source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unop__round_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__round_fc32_fc32)
// op(A') function: GB (_unop_tran__round_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_croundf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_croundf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_croundf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ROUND || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__round_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_croundf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_croundf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__round_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
par_csr_matvec.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "_hypre_parcsr_mv.h"
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatvec
*--------------------------------------------------------------------------*/
// y = alpha*A*x + beta*b
HYPRE_Int
hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *b,
hypre_ParVector *y )
{
hypre_ParCSRCommHandle **comm_handle;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *b_local = hypre_ParVectorLocalVector(b);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
hypre_Vector *x_tmp;
HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x);
HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b);
HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, jv;
HYPRE_Int vecstride = hypre_VectorVectorStride( x_local );
HYPRE_Int idxstride = hypre_VectorIndexStride( x_local );
HYPRE_Complex *x_tmp_data, **x_buf_data;
HYPRE_Complex *x_local_data = hypre_VectorData(x_local);
hypre_HandleCudaComputeStreamSyncPush(hypre_handle, 0);
/*---------------------------------------------------------------------
* Check for size compatibility. ParMatvec returns ierr = 11 if
* length of X doesn't equal the number of columns of A,
* ierr = 12 if the length of Y doesn't equal the number of rows
* of A, and ierr = 13 if both are true.
*
* Because temporary vectors are often used in ParMatvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( idxstride>0 );
if (num_cols != x_size)
{
ierr = 11;
}
if (num_rows != y_size || num_rows != b_size)
{
ierr = 12;
}
if (num_cols != x_size && (num_rows != y_size || num_rows != b_size))
{
ierr = 13;
}
hypre_assert( hypre_VectorNumVectors(b_local) == num_vectors );
hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors );
if ( num_vectors == 1 )
{
x_tmp = hypre_SeqVectorCreate( num_cols_offd );
}
else
{
hypre_assert( num_vectors > 1 );
x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors );
}
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) );
hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
HYPRE_Int use_persistent_comm = 0;
#ifdef HYPRE_USING_PERSISTENT_COMM
use_persistent_comm = num_vectors == 1;
// JSP TODO: we can use persistent communication for multi-vectors,
// but then we need different communication handles for different
// num_vectors.
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg);
#endif
}
else
{
comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST);
}
/* x_tmp */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
/* for GPU and single vector, alloc persistent memory for x_tmp (in comm_pkg) and reuse */
if (num_vectors == 1)
{
if (!hypre_ParCSRCommPkgTmpData(comm_pkg))
{
hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE);
}
hypre_VectorData(x_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg);
hypre_SeqVectorSetDataOwner(x_tmp, 0);
}
#else
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_VectorData(x_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
hypre_SeqVectorSetDataOwner(x_tmp, 0);
#endif
}
#endif
hypre_SeqVectorInitialize_v2(x_tmp, HYPRE_MEMORY_DEVICE);
x_tmp_data = hypre_VectorData(x_tmp);
/* x_buff_data */
x_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST);
for (jv = 0; jv < num_vectors; ++jv)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
if (jv == 0)
{
if (!hypre_ParCSRCommPkgBufData(comm_pkg))
{
hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
}
x_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg);
continue;
}
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
x_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
continue;
#endif
}
x_buf_data[jv] = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
}
/* The assert is because the following loop only works for 'column'
storage of a multivector. This needs to be fixed to work more generally,
at least for 'row' storage. This in turn, means either change CommPkg so
num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put
a stride in the logic of CommHandleCreate (stride either from a new arg or
a new variable inside CommPkg). Or put the num_vector iteration inside
CommHandleCreate (perhaps a new multivector variant of it).
*/
hypre_assert( idxstride == 1 );
hypre_SeqVectorPrefetch(x_local, HYPRE_MEMORY_DEVICE);
/* send_map_elmts on device */
hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg);
for (jv = 0; jv < num_vectors; ++jv)
{
HYPRE_Complex *send_data = (HYPRE_Complex *) x_buf_data[jv];
HYPRE_Complex *locl_data = x_local_data + jv * vecstride;
/* if on device, no need to Sync: send_data is on device memory */
#if defined(HYPRE_USING_CUDA)
/* pack send data on device */
HYPRE_THRUST_CALL( gather,
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg),
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) +
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
locl_data,
send_data );
#elif defined(HYPRE_USING_DEVICE_OPENMP)
/* pack send data on device */
HYPRE_Int i;
HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg);
HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#pragma omp target teams distribute parallel for private(i) is_device_ptr(send_data, locl_data, device_send_map_elmts)
for (i = start; i < end; i++)
{
send_data[i] = locl_data[device_send_map_elmts[i]];
}
#else
HYPRE_Int i;
/* pack send data on host */
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
i ++)
{
send_data[i] = locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)];
}
#endif
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
/* nonblocking communication starts */
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_buf_data[0]);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg, HYPRE_MEMORY_DEVICE, x_buf_data[jv],
HYPRE_MEMORY_DEVICE, &x_tmp_data[jv*num_cols_offd] );
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
/* overlapped local computation */
hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0 );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
/* nonblocking communication ends */
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_tmp_data);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
hypre_ParCSRCommHandleDestroy(comm_handle[jv]);
comm_handle[jv] = NULL;
}
hypre_TFree(comm_handle, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
/* computation offd part */
if (num_cols_offd)
{
hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local );
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL;
if (!use_persistent_comm)
{
for ( jv = 0; jv < num_vectors; ++jv )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
if (jv == 0)
{
continue;
}
#endif
hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_DEVICE);
}
hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST);
}
hypre_HandleCudaComputeStreamSyncPop(hypre_handle);
hypre_SyncCudaComputeStream(hypre_handle);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
#endif
return ierr;
}
HYPRE_Int
hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *y )
{
return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatvecT
*
* Performs y <- alpha * A^T * x + beta * y
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *y )
{
hypre_ParCSRCommHandle **comm_handle;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *diagT = hypre_ParCSRMatrixDiagT(A);
hypre_CSRMatrix *offdT = hypre_ParCSRMatrixOffdT(A);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
hypre_Vector *y_tmp;
HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x);
HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, jv;
HYPRE_Int vecstride = hypre_VectorVectorStride(y_local);
HYPRE_Int idxstride = hypre_VectorIndexStride(y_local);
HYPRE_Complex *y_tmp_data, **y_buf_data;
HYPRE_Complex *y_local_data = hypre_VectorData(y_local);
hypre_HandleCudaComputeStreamSyncPush(hypre_handle, 0);
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_rows != x_size)
{
ierr = 1;
}
if (num_cols != y_size)
{
ierr = 2;
}
if (num_rows != x_size && num_cols != y_size)
{
ierr = 3;
}
hypre_assert( hypre_VectorNumVectors(x_local) == num_vectors );
hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors );
if ( num_vectors == 1 )
{
y_tmp = hypre_SeqVectorCreate(num_cols_offd);
}
else
{
hypre_assert( num_vectors > 1 );
y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd, num_vectors);
}
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) );
hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
HYPRE_Int use_persistent_comm = 0;
#ifdef HYPRE_USING_PERSISTENT_COMM
use_persistent_comm = num_vectors == 1;
// JSP TODO: we can use persistent communication for multi-vectors,
// but then we need different communication handles for different
// num_vectors.
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg);
#endif
}
else
{
comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST);
}
/* y_tmp */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
/* for GPU and single vector, alloc persistent memory for y_tmp (in comm_pkg) and reuse */
if (num_vectors == 1)
{
if (!hypre_ParCSRCommPkgTmpData(comm_pkg))
{
hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE);
}
hypre_VectorData(y_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg);
hypre_SeqVectorSetDataOwner(y_tmp, 0);
}
#else
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_VectorData(y_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
hypre_SeqVectorSetDataOwner(y_tmp, 0);
#endif
}
#endif
hypre_SeqVectorInitialize_v2(y_tmp, HYPRE_MEMORY_DEVICE);
y_tmp_data = hypre_VectorData(y_tmp);
/* y_buf_data */
y_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST);
for (jv = 0; jv < num_vectors; ++jv)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
if (jv == 0)
{
if (!hypre_ParCSRCommPkgBufData(comm_pkg))
{
hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
}
y_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg);
continue;
}
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
y_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
continue;
#endif
}
y_buf_data[jv] = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
#endif
if (num_cols_offd)
{
if (offdT)
{
// offdT is optional. Used only if it's present
hypre_CSRMatrixMatvec(alpha, offdT, x_local, 0.0, y_tmp);
}
else
{
hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp);
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_tmp_data);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
/* this is where we assume multivectors are 'column' storage */
comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 2, comm_pkg, HYPRE_MEMORY_DEVICE, &y_tmp_data[jv*num_cols_offd],
HYPRE_MEMORY_DEVICE, y_buf_data[jv] );
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
/* overlapped local computation */
if (diagT)
{
// diagT is optional. Used only if it's present.
hypre_CSRMatrixMatvec(alpha, diagT, x_local, beta, y_local);
}
else
{
hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
/* nonblocking communication ends */
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_buf_data[0]);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
hypre_ParCSRCommHandleDestroy(comm_handle[jv]);
comm_handle[jv] = NULL;
}
hypre_TFree(comm_handle, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
/* The assert is because the following loop only works for 'column'
storage of a multivector. This needs to be fixed to work more generally,
at least for 'row' storage. This in turn, means either change CommPkg so
num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put
a stride in the logic of CommHandleCreate (stride either from a new arg or
a new variable inside CommPkg). Or put the num_vector iteration inside
CommHandleCreate (perhaps a new multivector variant of it).
*/
hypre_assert( idxstride == 1 );
/* send_map_elmts on device */
hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg);
for (jv = 0; jv < num_vectors; ++jv)
{
HYPRE_Complex *recv_data = (HYPRE_Complex *) y_buf_data[jv];
HYPRE_Complex *locl_data = y_local_data + jv * vecstride;
#if defined(HYPRE_USING_CUDA)
/* unpack recv data on device */
hypreDevice_GenScatterAdd(locl_data,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg),
recv_data);
#elif defined(HYPRE_USING_DEVICE_OPENMP)
HYPRE_Int i, j;
/* unpack recv data on device */
for (i = 0; i < num_sends; i++)
{
HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg);
HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1);
#pragma omp target teams distribute parallel for private(j) is_device_ptr(recv_data, locl_data, device_send_map_elmts)
for (j = start; j < end; j++)
{
locl_data[device_send_map_elmts[j]] += recv_data[j];
}
}
#else
HYPRE_Int i;
/* unpack recv data on host, TODO OMP? */
for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
i ++)
{
locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)] += recv_data[i];
}
#endif
}
hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL;
if (!use_persistent_comm)
{
for ( jv = 0; jv < num_vectors; ++jv )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP)
if (jv == 0)
{
continue;
}
#endif
hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_DEVICE);
}
hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST);
}
hypre_HandleCudaComputeStreamSyncPop(hypre_handle);
hypre_SyncCudaComputeStream(hypre_handle);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *y,
HYPRE_Int *CF_marker,
HYPRE_Int fpt )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
hypre_Vector *x_tmp;
HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x);
HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, i, j, index, start, num_procs;
HYPRE_Int *int_buf_data = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Complex *x_tmp_data = NULL;
HYPRE_Complex *x_buf_data = NULL;
HYPRE_Complex *x_local_data = hypre_VectorData(x_local);
/*---------------------------------------------------------------------
* Check for size compatibility. ParMatvec returns ierr = 11 if
* length of X doesn't equal the number of columns of A,
* ierr = 12 if the length of Y doesn't equal the number of rows
* of A, and ierr = 13 if both are true.
*
* Because temporary vectors are often used in ParMatvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm,&num_procs);
if (num_cols != x_size)
ierr = 11;
if (num_rows != y_size)
ierr = 12;
if (num_cols != x_size && num_rows != y_size)
ierr = 13;
if (num_procs > 1)
{
if (num_cols_offd)
{
x_tmp = hypre_SeqVectorCreate( num_cols_offd );
hypre_SeqVectorInitialize(x_tmp);
x_tmp_data = hypre_VectorData(x_tmp);
}
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (num_sends)
x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart
(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
x_buf_data[index++]
= x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle =
hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data );
}
hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker,
CF_marker, fpt);
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
if (num_sends)
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart
(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle =
hypre_ParCSRCommHandleCreate(11,comm_pkg,int_buf_data,CF_marker_offd );
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local,
CF_marker, CF_marker_offd, fpt);
hypre_SeqVectorDestroy(x_tmp);
x_tmp = NULL;
hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
}
return ierr;
}
|
ft_ao.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <complex.h>
#include <assert.h>
#include "config.h"
#include "cint.h"
#include "gto/ft_ao.h"
#include "vhf/fblas.h"
#define INTBUFMAX 16000
#define IMGBLK 80
#define OF_CMPLX 2
#define MIN(X,Y) ((X)<(Y)?(X):(Y))
#define MAX(X,Y) ((X)>(Y)?(X):(Y))
int PBCsizeof_env(int *shls_slice,
int *atm, int natm, int *bas, int nbas, double *env);
static void shift_bas(double *env_loc, double *env, double *Ls, int ptr, int iL)
{
env_loc[ptr+0] = env[ptr+0] + Ls[iL*3+0];
env_loc[ptr+1] = env[ptr+1] + Ls[iL*3+1];
env_loc[ptr+2] = env[ptr+2] + Ls[iL*3+2];
}
/*
* Multiple k-points
*/
static void _ft_fill_k(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
void (*fsort)(), double complex *out, int nkpts,
int comp, int nimgs, int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
ish += ish0;
jsh += jsh0;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const char TRANS_N = 'N';
const double complex Z1 = 1;
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int shls[2] = {ish, jsh};
int dims[2] = {di, dj};
double complex *bufk = buf;
double complex *bufL = buf + dij*blksize * comp * nkpts;
double complex *pbuf;
int gs0, gs1, dg, dijg;
int jL0, jLcount, jL;
int i;
for (gs0 = 0; gs0 < nGv; gs0 += blksize) {
gs1 = MIN(gs0+blksize, nGv);
dg = gs1 - gs0;
dijg = dij * dg * comp;
for (i = 0; i < dijg*nkpts; i++) {
bufk[i] = 0;
}
for (jL0 = 0; jL0 < nimgs; jL0 += IMGBLK) {
jLcount = MIN(IMGBLK, nimgs-jL0);
pbuf = bufL;
for (jL = jL0; jL < jL0+jLcount; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
if ((*intor)(pbuf, shls, dims, eval_aopair, eval_gz,
Z1, sGv, b, sgxyz, gs, dg,
atm, natm, bas, nbas, env_loc)) {
} else {
for (i = 0; i < dijg; i++) {
pbuf[i] = 0;
}
}
pbuf += dijg;
}
zgemm_(&TRANS_N, &TRANS_N, &dijg, &nkpts, &jLcount,
&Z1, bufL, &dijg, expkL+jL0, &nimgs,
&Z1, bufk, &dijg);
}
(*fsort)(out, bufk, shls_slice, ao_loc,
nkpts, comp, nGv, ish, jsh, gs0, gs1);
sGv += dg * 3;
if (sgxyz != NULL) {
sgxyz += dg * 3;
}
}
}
/*
* Single k-point
*/
static void _ft_fill_nk1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
void (*fsort)(), double complex *out, int nkpts,
int comp, int nimgs, int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int jsh0 = shls_slice[2];
ish += ish0;
jsh += jsh0;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS];
int shls[2] = {ish, jsh};
int dims[2] = {di, dj};
double complex *bufk = buf;
double complex *bufL = buf + dij*blksize * comp;
int gs0, gs1, dg, jL, i;
size_t dijg;
for (gs0 = 0; gs0 < nGv; gs0 += blksize) {
gs1 = MIN(gs0+blksize, nGv);
dg = gs1 - gs0;
dijg = dij * dg * comp;
for (i = 0; i < dijg; i++) {
bufk[i] = 0;
}
for (jL = 0; jL < nimgs; jL++) {
shift_bas(env_loc, env, Ls, jptrxyz, jL);
if ((*intor)(bufL, shls, dims, eval_aopair, eval_gz,
expkL[jL], sGv, b, sgxyz, gs, dg,
atm, natm, bas, nbas, env_loc)) {
for (i = 0; i < dijg; i++) {
bufk[i] += bufL[i];
}
}
}
(*fsort)(out, bufk, shls_slice, ao_loc,
nkpts, comp, nGv, ish, jsh, gs0, gs1);
sGv += dg * 3;
if (sgxyz != NULL) {
sgxyz += dg * 3;
}
}
}
static void sort_s1(double complex *out, double complex *in,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int nGv, int ish, int jsh, int gs0, int gs1)
{
const size_t NGv = nGv;
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t nijg = naoi * naoj * NGv;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
const int dg = gs1 - gs0;
const size_t dijg = di * dj * dg;
out += (ip * naoj + jp) * NGv + gs0;
int i, j, n, ic, kk;
double complex *pin, *pout;
for (kk = 0; kk < nkpts; kk++) {
for (ic = 0; ic < comp; ic++) {
for (j = 0; j < dj; j++) {
for (i = 0; i < di; i++) {
pout = out + (i*naoj+j) * NGv;
pin = in + (j*di+i) * dg;
for (n = 0; n < dg; n++) {
pout[n] = pin[n];
}
} }
out += nijg;
in += dijg;
} }
}
static void sort_s2_igtj(double complex *out, double complex *in,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int nGv, int ish, int jsh, int gs0, int gs1)
{
const size_t NGv = nGv;
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijg = nij * NGv;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dg = gs1 - gs0;
const size_t dijg = dij * dg;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * NGv + gs0;
const int ip1 = ao_loc[ish] + 1;
int i, j, n, ic, kk;
double complex *pin, *pout;
for (kk = 0; kk < nkpts; kk++) {
for (ic = 0; ic < comp; ic++) {
pout = out;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pin = in + (j*di+i) * dg;
for (n = 0; n < dg; n++) {
pout[j*NGv+n] = pin[n];
}
}
pout += (ip1 + i) * NGv;
}
out += nijg;
in += dijg;
} }
}
static void sort_s2_ieqj(double complex *out, double complex *in,
int *shls_slice, int *ao_loc, int nkpts, int comp,
int nGv, int ish, int jsh, int gs0, int gs1)
{
const size_t NGv = nGv;
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2;
const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0;
const size_t nijg = nij * NGv;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
const int dij = di * dj;
const int dg = gs1 - gs0;
const size_t dijg = dij * dg;
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * NGv + gs0;
const int ip1 = ao_loc[ish] + 1;
int i, j, n, ic, kk;
double complex *pin, *pout;
for (kk = 0; kk < nkpts; kk++) {
for (ic = 0; ic < comp; ic++) {
pout = out;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
pin = in + (j*di+i) * dg;
for (n = 0; n < dg; n++) {
pout[j*NGv+n] = pin[n];
}
}
pout += (ip1 + i) * NGv;
}
out += nijg;
in += dijg;
} }
}
void PBC_ft_fill_ks1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
_ft_fill_k(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
void PBC_ft_fill_ks2(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_ft_fill_k(intor, eval_aopair, eval_gz, &sort_s2_igtj,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
} else if (ip == jp) {
_ft_fill_k(intor, eval_aopair, eval_gz, &sort_s2_ieqj,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
void PBC_ft_fill_nk1s1(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
_ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
void PBC_ft_fill_nk1s1hermi(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip >= jp) {
_ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s1,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
void PBC_ft_fill_nk1s2(int (*intor)(), int (*eval_aopair)(), void (*eval_gz)(),
double complex *out, int nkpts, int comp, int nimgs,
int blksize, int ish, int jsh,
double complex *buf, double *env_loc, double *Ls,
double complex *expkL, int *shls_slice, int *ao_loc,
double *sGv, double *b, int *sgxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
int ip = ish + shls_slice[0];
int jp = jsh + shls_slice[2] - nbas;
if (ip > jp) {
_ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s2_igtj,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
} else if (ip == jp) {
_ft_fill_nk1(intor, eval_aopair, eval_gz, &sort_s2_ieqj,
out, nkpts, comp, nimgs, blksize, ish, jsh,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
}
static int subgroupGv(double *sGv, int *sgxyz, double *Gv, int *gxyz,
int nGv, int bufsize, int *shls_slice, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
int i;
int dimax = 0;
int djmax = 0;
for (i = shls_slice[0]; i < shls_slice[1]; i++) {
dimax = MAX(dimax, ao_loc[i+1]-ao_loc[i]);
}
for (i = shls_slice[2]; i < shls_slice[3]; i++) {
djmax = MAX(djmax, ao_loc[i+1]-ao_loc[i]);
}
int dij = dimax * djmax;
int gblksize = 0xfffffff8 & (bufsize / dij);
int gs0, dg;
for (gs0 = 0; gs0 < nGv; gs0 += gblksize) {
dg = MIN(nGv-gs0, gblksize);
for (i = 0; i < 3; i++) {
memcpy(sGv+dg*i, Gv+nGv*i+gs0, sizeof(double)*dg);
}
sGv += dg * 3;
if (gxyz != NULL) {
for (i = 0; i < 3; i++) {
memcpy(sgxyz+dg*i, gxyz+nGv*i+gs0, sizeof(int)*dg);
}
sgxyz += dg * 3;
}
}
return gblksize;
}
void PBC_ft_latsum_drv(int (*intor)(), void (*eval_gz)(), void (*fill)(),
double complex *out, int nkpts, int comp, int nimgs,
double *Ls, double complex *expkL,
int *shls_slice, int *ao_loc,
double *Gv, double *b, int *gxyz, int *gs, int nGv,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
double *sGv = malloc(sizeof(double) * nGv * 3);
int *sgxyz = NULL;
if (gxyz != NULL) {
sgxyz = malloc(sizeof(int) * nGv * 3);
}
int blksize;
if (fill == &PBC_ft_fill_nk1s1 || fill == &PBC_ft_fill_nk1s2 ||
fill == &PBC_ft_fill_nk1s1hermi) {
blksize = subgroupGv(sGv, sgxyz, Gv, gxyz, nGv, INTBUFMAX*IMGBLK/2,
shls_slice, ao_loc, atm, natm, bas, nbas, env);
} else {
blksize = subgroupGv(sGv, sgxyz, Gv, gxyz, nGv, INTBUFMAX,
shls_slice, ao_loc, atm, natm, bas, nbas, env);
}
int (*eval_aopair)() = NULL;
if (intor != >O_ft_ovlp_cart && intor != >O_ft_ovlp_sph) {
eval_aopair = >O_aopair_lazy_contract;
}
#pragma omp parallel
{
int i, j, ij;
int nenv = PBCsizeof_env(shls_slice, atm, natm, bas, nbas, env);
nenv = MAX(nenv, PBCsizeof_env(shls_slice+2, atm, natm, bas, nbas, env));
double *env_loc = malloc(sizeof(double)*nenv);
memcpy(env_loc, env, sizeof(double)*nenv);
size_t count = nkpts + IMGBLK;
double complex *buf = malloc(sizeof(double complex)*count*INTBUFMAX*comp);
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
i = ij / njsh;
j = ij % njsh;
(*fill)(intor, eval_aopair, eval_gz,
out, nkpts, comp, nimgs, blksize, i, j,
buf, env_loc, Ls, expkL, shls_slice, ao_loc,
sGv, b, sgxyz, gs, nGv, atm, natm, bas, nbas, env);
}
free(buf);
free(env_loc);
}
free(sGv);
if (sgxyz != NULL) {
free(sgxyz);
}
}
|
GB_unop__identity_fp32_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp32_bool)
// op(A') function: GB (_unop_tran__identity_fp32_bool)
// C type: float
// A type: bool
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp32_bool)
(
float *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
master.c | //////////////////////////////////////////////////////////////
//
// master.c
//
// Copyright (c) 2017, Hassan Salehe Matar
// All rights reserved.
//
// This file is part of Clanomp. For details, see
// https://github.com/hassansalehe/Clanomp. Please also
// see the LICENSE file for additional BSD notice
//
// Redistribution and use in source and binary forms, with
// or without modification, are permitted provided that
// the following conditions are met:
//
// * Redistributions of source code must retain the above
// copyright notice, this list of conditions and the
// following disclaimer.
//
// * Redistributions in binary form must reproduce the
// above copyright notice, this list of conditions and
// the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// * Neither the name of the copyright holder nor the names
// of its contributors may be used to endorse or promote
// products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
// CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE.
//
//////////////////////////////////////////////////////////////
// This program includes the "master" construct.
// The master thread prints its own thread id.
//
// References:
// 1. http://www.openmp.org/wp-content/uploads/openmp-examples-4.5.0.pdf
// 2. http://www.openmp.org/wp-content/uploads/openmp-4.5.pdf
#include <stdio.h>
#include <omp.h>
int main() {
int count = 0;
#pragma omp parallel shared(count)
{
#pragma omp master
{
count++;
}
}
printf("Value of count: %d, construct: <master>\n", count);
return 0;
}
|
3_2_1.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 500000000
double cal_time(struct timespec *t_end, struct timespec *t_start)
{
double elapsedTime;
elapsedTime = (t_end->tv_sec - t_start->tv_sec) * 1000.0;
elapsedTime += (t_end->tv_nsec - t_start->tv_nsec) / 1000000.0;
return elapsedTime;
}
int main()
{
struct timespec t_start, t_end;
double elapsedTime;
int i;
int *A, *B, *C, *CC;
A = (int *)malloc(N * sizeof(int));
B = (int *)malloc(N * sizeof(int));
C = (int *)malloc(N * sizeof(int));
CC = (int *)malloc(N * sizeof(int));
for (i = 0; i < N; i++)
{
A[i] = rand() % 256;
B[i] = rand() % 256;
}
// start time
clock_gettime(CLOCK_REALTIME, &t_start);
for (i = 0; i < N; i++)
{
C[i] = A[i] + B[i];
}
// stop time
clock_gettime(CLOCK_REALTIME, &t_end);
printf("Sequential time: %lf ms\n", cal_time(&t_end, &t_start));
// start time
clock_gettime(CLOCK_REALTIME, &t_start);
// if u use this line without parallel, it onle use master thread
#pragma omp parallel for
for (i = 0; i < N; i++)
{
CC[i] = A[i] + B[i];
}
// stop time
clock_gettime(CLOCK_REALTIME, &t_end);
printf("Parallel elapsedTime: %lf ms\n", cal_time(&t_end, &t_start));
// Calculate time
for (i = 0; i < N; i++)
{
if (CC[i] != C[i])
break;
}
if (i == N)
printf("Test pass!!!\n");
return 0;
} |
DeclOpenMP.h | //===- DeclOpenMP.h - Classes for representing OpenMP directives -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines OpenMP nodes for declarative directives.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_DECLOPENMP_H
#define LLVM_CLANG_AST_DECLOPENMP_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/TrailingObjects.h"
namespace clang {
/// This is a basic class for representing single OpenMP declarative directive.
///
template <typename U> class OMPDeclarativeDirective : public U {
friend class ASTDeclReader;
friend class ASTDeclWriter;
/// Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
if (!Data)
return llvm::None;
return Data->getClauses();
}
protected:
/// Data, associated with the directive.
OMPChildren *Data = nullptr;
/// Build instance of directive.
///
/// \param StartLoc Starting location of the directive (directive keyword).
///
template <typename... Params>
OMPDeclarativeDirective(Params &&... P) : U(std::forward<Params>(P)...) {}
template <typename T, typename... Params>
static T *createDirective(const ASTContext &C, DeclContext *DC,
ArrayRef<OMPClause *> Clauses, unsigned NumChildren,
Params &&... P) {
auto *Inst = new (C, DC, size(Clauses.size(), NumChildren))
T(DC, std::forward<Params>(P)...);
Inst->Data = OMPChildren::Create(Inst + 1, Clauses,
/*AssociatedStmt=*/nullptr, NumChildren);
Inst->Data->setClauses(Clauses);
return Inst;
}
template <typename T, typename... Params>
static T *createEmptyDirective(const ASTContext &C, unsigned ID,
unsigned NumClauses, unsigned NumChildren,
Params &&... P) {
auto *Inst = new (C, ID, size(NumClauses, NumChildren))
T(nullptr, std::forward<Params>(P)...);
Inst->Data = OMPChildren::CreateEmpty(
Inst + 1, NumClauses, /*HasAssociatedStmt=*/false, NumChildren);
return Inst;
}
static size_t size(unsigned NumClauses, unsigned NumChildren) {
return OMPChildren::size(NumClauses, /*HasAssociatedStmt=*/false,
NumChildren);
}
public:
/// Get number of clauses.
unsigned getNumClauses() const {
if (!Data)
return 0;
return Data->getNumClauses();
}
/// Returns specified clause.
///
/// \param I Number of clause.
///
OMPClause *getClause(unsigned I) const { return clauses()[I]; }
ArrayRef<OMPClause *> clauses() const {
if (!Data)
return llvm::None;
return Data->getClauses();
}
};
/// This represents '#pragma omp threadprivate ...' directive.
/// For example, in the following, both 'a' and 'A::b' are threadprivate:
///
/// \code
/// int a;
/// #pragma omp threadprivate(a)
/// struct A {
/// static int b;
/// #pragma omp threadprivate(b)
/// };
/// \endcode
///
class OMPThreadPrivateDecl final : public OMPDeclarativeDirective<Decl> {
friend class OMPDeclarativeDirective<Decl>;
virtual void anchor();
OMPThreadPrivateDecl(DeclContext *DC = nullptr,
SourceLocation L = SourceLocation())
: OMPDeclarativeDirective<Decl>(OMPThreadPrivate, DC, L) {}
ArrayRef<const Expr *> getVars() const {
auto **Storage = reinterpret_cast<Expr **>(Data->getChildren().data());
return llvm::makeArrayRef(Storage, Data->getNumChildren());
}
MutableArrayRef<Expr *> getVars() {
auto **Storage = reinterpret_cast<Expr **>(Data->getChildren().data());
return llvm::makeMutableArrayRef(Storage, Data->getNumChildren());
}
void setVars(ArrayRef<Expr *> VL);
public:
static OMPThreadPrivateDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
ArrayRef<Expr *> VL);
static OMPThreadPrivateDecl *CreateDeserialized(ASTContext &C,
unsigned ID, unsigned N);
typedef MutableArrayRef<Expr *>::iterator varlist_iterator;
typedef ArrayRef<const Expr *>::iterator varlist_const_iterator;
typedef llvm::iterator_range<varlist_iterator> varlist_range;
typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range;
unsigned varlist_size() const { return Data->getNumChildren(); }
bool varlist_empty() const { return Data->getChildren().empty(); }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVars().begin(); }
varlist_iterator varlist_end() { return getVars().end(); }
varlist_const_iterator varlist_begin() const { return getVars().begin(); }
varlist_const_iterator varlist_end() const { return getVars().end(); }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPThreadPrivate; }
};
/// This represents '#pragma omp declare reduction ...' directive.
/// For example, in the following, declared reduction 'foo' for types 'int' and
/// 'float':
///
/// \code
/// #pragma omp declare reduction (foo : int,float : omp_out += omp_in) \
/// initializer (omp_priv = 0)
/// \endcode
///
/// Here 'omp_out += omp_in' is a combiner and 'omp_priv = 0' is an initializer.
class OMPDeclareReductionDecl final : public ValueDecl, public DeclContext {
// This class stores some data in DeclContext::OMPDeclareReductionDeclBits
// to save some space. Use the provided accessors to access it.
public:
enum InitKind {
CallInit, // Initialized by function call.
DirectInit, // omp_priv(<expr>)
CopyInit // omp_priv = <expr>
};
private:
friend class ASTDeclReader;
/// Combiner for declare reduction construct.
Expr *Combiner = nullptr;
/// Initializer for declare reduction construct.
Expr *Initializer = nullptr;
/// In parameter of the combiner.
Expr *In = nullptr;
/// Out parameter of the combiner.
Expr *Out = nullptr;
/// Priv parameter of the initializer.
Expr *Priv = nullptr;
/// Orig parameter of the initializer.
Expr *Orig = nullptr;
/// Reference to the previous declare reduction construct in the same
/// scope with the same name. Required for proper templates instantiation if
/// the declare reduction construct is declared inside compound statement.
LazyDeclPtr PrevDeclInScope;
void anchor() override;
OMPDeclareReductionDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName Name, QualType Ty,
OMPDeclareReductionDecl *PrevDeclInScope);
void setPrevDeclInScope(OMPDeclareReductionDecl *Prev) {
PrevDeclInScope = Prev;
}
public:
/// Create declare reduction node.
static OMPDeclareReductionDecl *
Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name,
QualType T, OMPDeclareReductionDecl *PrevDeclInScope);
/// Create deserialized declare reduction node.
static OMPDeclareReductionDecl *CreateDeserialized(ASTContext &C,
unsigned ID);
/// Get combiner expression of the declare reduction construct.
Expr *getCombiner() { return Combiner; }
const Expr *getCombiner() const { return Combiner; }
/// Get In variable of the combiner.
Expr *getCombinerIn() { return In; }
const Expr *getCombinerIn() const { return In; }
/// Get Out variable of the combiner.
Expr *getCombinerOut() { return Out; }
const Expr *getCombinerOut() const { return Out; }
/// Set combiner expression for the declare reduction construct.
void setCombiner(Expr *E) { Combiner = E; }
/// Set combiner In and Out vars.
void setCombinerData(Expr *InE, Expr *OutE) {
In = InE;
Out = OutE;
}
/// Get initializer expression (if specified) of the declare reduction
/// construct.
Expr *getInitializer() { return Initializer; }
const Expr *getInitializer() const { return Initializer; }
/// Get initializer kind.
InitKind getInitializerKind() const {
return static_cast<InitKind>(OMPDeclareReductionDeclBits.InitializerKind);
}
/// Get Orig variable of the initializer.
Expr *getInitOrig() { return Orig; }
const Expr *getInitOrig() const { return Orig; }
/// Get Priv variable of the initializer.
Expr *getInitPriv() { return Priv; }
const Expr *getInitPriv() const { return Priv; }
/// Set initializer expression for the declare reduction construct.
void setInitializer(Expr *E, InitKind IK) {
Initializer = E;
OMPDeclareReductionDeclBits.InitializerKind = IK;
}
/// Set initializer Orig and Priv vars.
void setInitializerData(Expr *OrigE, Expr *PrivE) {
Orig = OrigE;
Priv = PrivE;
}
/// Get reference to previous declare reduction construct in the same
/// scope with the same name.
OMPDeclareReductionDecl *getPrevDeclInScope();
const OMPDeclareReductionDecl *getPrevDeclInScope() const;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPDeclareReduction; }
static DeclContext *castToDeclContext(const OMPDeclareReductionDecl *D) {
return static_cast<DeclContext *>(const_cast<OMPDeclareReductionDecl *>(D));
}
static OMPDeclareReductionDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<OMPDeclareReductionDecl *>(
const_cast<DeclContext *>(DC));
}
};
/// This represents '#pragma omp declare mapper ...' directive. Map clauses are
/// allowed to use with this directive. The following example declares a user
/// defined mapper for the type 'struct vec'. This example instructs the fields
/// 'len' and 'data' should be mapped when mapping instances of 'struct vec'.
///
/// \code
/// #pragma omp declare mapper(mid: struct vec v) map(v.len, v.data[0:N])
/// \endcode
class OMPDeclareMapperDecl final : public OMPDeclarativeDirective<ValueDecl>,
public DeclContext {
friend class OMPDeclarativeDirective<ValueDecl>;
friend class ASTDeclReader;
friend class ASTDeclWriter;
/// Mapper variable, which is 'v' in the example above
Expr *MapperVarRef = nullptr;
/// Name of the mapper variable
DeclarationName VarName;
LazyDeclPtr PrevDeclInScope;
void anchor() override;
OMPDeclareMapperDecl(DeclContext *DC, SourceLocation L, DeclarationName Name,
QualType Ty, DeclarationName VarName,
OMPDeclareMapperDecl *PrevDeclInScope)
: OMPDeclarativeDirective<ValueDecl>(OMPDeclareMapper, DC, L, Name, Ty),
DeclContext(OMPDeclareMapper), VarName(VarName),
PrevDeclInScope(PrevDeclInScope) {}
void setPrevDeclInScope(OMPDeclareMapperDecl *Prev) {
PrevDeclInScope = Prev;
}
public:
/// Creates declare mapper node.
static OMPDeclareMapperDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, DeclarationName Name,
QualType T, DeclarationName VarName,
ArrayRef<OMPClause *> Clauses,
OMPDeclareMapperDecl *PrevDeclInScope);
/// Creates deserialized declare mapper node.
static OMPDeclareMapperDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned N);
using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator;
using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator;
using clauselist_range = llvm::iterator_range<clauselist_iterator>;
using clauselist_const_range =
llvm::iterator_range<clauselist_const_iterator>;
unsigned clauselist_size() const { return Data->getNumClauses(); }
bool clauselist_empty() const { return Data->getClauses().empty(); }
clauselist_range clauselists() {
return clauselist_range(clauselist_begin(), clauselist_end());
}
clauselist_const_range clauselists() const {
return clauselist_const_range(clauselist_begin(), clauselist_end());
}
clauselist_iterator clauselist_begin() { return Data->getClauses().begin(); }
clauselist_iterator clauselist_end() { return Data->getClauses().end(); }
clauselist_const_iterator clauselist_begin() const {
return Data->getClauses().begin();
}
clauselist_const_iterator clauselist_end() const {
return Data->getClauses().end();
}
/// Get the variable declared in the mapper
Expr *getMapperVarRef() { return cast_or_null<Expr>(Data->getChildren()[0]); }
const Expr *getMapperVarRef() const {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
/// Set the variable declared in the mapper
void setMapperVarRef(Expr *MapperVarRefE) {
Data->getChildren()[0] = MapperVarRefE;
}
/// Get the name of the variable declared in the mapper
DeclarationName getVarName() { return VarName; }
/// Get reference to previous declare mapper construct in the same
/// scope with the same name.
OMPDeclareMapperDecl *getPrevDeclInScope();
const OMPDeclareMapperDecl *getPrevDeclInScope() const;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPDeclareMapper; }
static DeclContext *castToDeclContext(const OMPDeclareMapperDecl *D) {
return static_cast<DeclContext *>(const_cast<OMPDeclareMapperDecl *>(D));
}
static OMPDeclareMapperDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<OMPDeclareMapperDecl *>(const_cast<DeclContext *>(DC));
}
};
/// Pseudo declaration for capturing expressions. Also is used for capturing of
/// non-static data members in non-static member functions.
///
/// Clang supports capturing of variables only, but OpenMP 4.5 allows to
/// privatize non-static members of current class in non-static member
/// functions. This pseudo-declaration allows properly handle this kind of
/// capture by wrapping captured expression into a variable-like declaration.
class OMPCapturedExprDecl final : public VarDecl {
friend class ASTDeclReader;
void anchor() override;
OMPCapturedExprDecl(ASTContext &C, DeclContext *DC, IdentifierInfo *Id,
QualType Type, TypeSourceInfo *TInfo,
SourceLocation StartLoc)
: VarDecl(OMPCapturedExpr, C, DC, StartLoc, StartLoc, Id, Type, TInfo,
SC_None) {
setImplicit();
}
public:
static OMPCapturedExprDecl *Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id, QualType T,
SourceLocation StartLoc);
static OMPCapturedExprDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceRange getSourceRange() const override LLVM_READONLY;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPCapturedExpr; }
};
/// This represents '#pragma omp requires...' directive.
/// For example
///
/// \code
/// #pragma omp requires unified_address
/// \endcode
///
class OMPRequiresDecl final : public OMPDeclarativeDirective<Decl> {
friend class OMPDeclarativeDirective<Decl>;
friend class ASTDeclReader;
virtual void anchor();
OMPRequiresDecl(DeclContext *DC, SourceLocation L)
: OMPDeclarativeDirective<Decl>(OMPRequires, DC, L) {}
public:
/// Create requires node.
static OMPRequiresDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, ArrayRef<OMPClause *> CL);
/// Create deserialized requires node.
static OMPRequiresDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned N);
using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator;
using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator;
using clauselist_range = llvm::iterator_range<clauselist_iterator>;
using clauselist_const_range = llvm::iterator_range<clauselist_const_iterator>;
unsigned clauselist_size() const { return Data->getNumClauses(); }
bool clauselist_empty() const { return Data->getClauses().empty(); }
clauselist_range clauselists() {
return clauselist_range(clauselist_begin(), clauselist_end());
}
clauselist_const_range clauselists() const {
return clauselist_const_range(clauselist_begin(), clauselist_end());
}
clauselist_iterator clauselist_begin() { return Data->getClauses().begin(); }
clauselist_iterator clauselist_end() { return Data->getClauses().end(); }
clauselist_const_iterator clauselist_begin() const {
return Data->getClauses().begin();
}
clauselist_const_iterator clauselist_end() const {
return Data->getClauses().end();
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPRequires; }
};
/// This represents '#pragma omp allocate ...' directive.
/// For example, in the following, the default allocator is used for both 'a'
/// and 'A::b':
///
/// \code
/// int a;
/// #pragma omp allocate(a)
/// struct A {
/// static int b;
/// #pragma omp allocate(b)
/// };
/// \endcode
///
class OMPAllocateDecl final : public OMPDeclarativeDirective<Decl> {
friend class OMPDeclarativeDirective<Decl>;
friend class ASTDeclReader;
virtual void anchor();
OMPAllocateDecl(DeclContext *DC, SourceLocation L)
: OMPDeclarativeDirective<Decl>(OMPAllocate, DC, L) {}
ArrayRef<const Expr *> getVars() const {
auto **Storage = reinterpret_cast<Expr **>(Data->getChildren().data());
return llvm::makeArrayRef(Storage, Data->getNumChildren());
}
MutableArrayRef<Expr *> getVars() {
auto **Storage = reinterpret_cast<Expr **>(Data->getChildren().data());
return llvm::makeMutableArrayRef(Storage, Data->getNumChildren());
}
void setVars(ArrayRef<Expr *> VL);
public:
static OMPAllocateDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, ArrayRef<Expr *> VL,
ArrayRef<OMPClause *> CL);
static OMPAllocateDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NVars, unsigned NClauses);
typedef MutableArrayRef<Expr *>::iterator varlist_iterator;
typedef ArrayRef<const Expr *>::iterator varlist_const_iterator;
typedef llvm::iterator_range<varlist_iterator> varlist_range;
typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range;
using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator;
using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator;
using clauselist_range = llvm::iterator_range<clauselist_iterator>;
using clauselist_const_range = llvm::iterator_range<clauselist_const_iterator>;
unsigned varlist_size() const { return Data->getNumChildren(); }
bool varlist_empty() const { return Data->getChildren().empty(); }
unsigned clauselist_size() const { return Data->getNumClauses(); }
bool clauselist_empty() const { return Data->getClauses().empty(); }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVars().begin(); }
varlist_iterator varlist_end() { return getVars().end(); }
varlist_const_iterator varlist_begin() const { return getVars().begin(); }
varlist_const_iterator varlist_end() const { return getVars().end(); }
clauselist_range clauselists() {
return clauselist_range(clauselist_begin(), clauselist_end());
}
clauselist_const_range clauselists() const {
return clauselist_const_range(clauselist_begin(), clauselist_end());
}
clauselist_iterator clauselist_begin() { return Data->getClauses().begin(); }
clauselist_iterator clauselist_end() { return Data->getClauses().end(); }
clauselist_const_iterator clauselist_begin() const {
return Data->getClauses().begin();
}
clauselist_const_iterator clauselist_end() const {
return Data->getClauses().end();
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPAllocate; }
};
} // end namespace clang
#endif
|
AutoCo_Parta.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main() {
int Size;
float * Array;
float * Sums;
FILE * fp;
int i,shift;
fp = fopen( "signal.txt", "r" );
if( fp == NULL )
{
fprintf( stderr, "Cannot open file 'signal.txt'\n" );
exit( 1 );
}
fscanf( fp, "%d", &Size );
Array = (float *)malloc( 2 * Size * sizeof(float) );
Sums = (float *)malloc( 1 * Size * sizeof(float) );
for( i = 0; i < Size; i++ )
{
fscanf( fp, "%f", &Array[i] );
Array[i+Size] = Array[i]; // duplicate the array
}
fclose( fp );
i=0;
omp_set_num_threads(2);
double time0=omp_get_wtime();
#pragma omp parallel for default(none) private(shift,i) shared (Array,Sums,Size)
for(shift = 0; shift < Size; shift++ )
{
float sum = 0.;
for( i = 0; i < Size; i++ )
{
sum += Array[i] * Array[i + shift];
}
Sums[shift] = sum; // note the "fix #2" from false sharing if you are using OpenMP
}
double time1=omp_get_wtime();
double Execution_Time=time1-time0;
printf("Performance = %lf\n",(Size*Size)/Execution_Time/1000000);
fp = fopen("AutoCor_Parta.csv", "w+");
for( i=0 ;i <512; i++) {
//printf("Sum[%d] = %lf \n",i,Sums[i]);
fprintf(fp,"%lf",Sums[i]);
fprintf(fp,"\n");
}
fclose(fp);
return 0;
}
|
TransferOP.h | /*
* TransferOP.h
*
* Created on: Jul 20, 2016
* Author: mason
*/
#ifndef TransferOP_H_
#define TransferOP_H_
#include "Param.h"
#include "MyLib.h"
#include "Node.h"
#include "Graph.h"
class TransferParams {
public:
vector<Param> W;
PAlphabet elems;
int nVSize;
int nInSize;
int nOutSize;
public:
TransferParams() {
nVSize = 0;
}
inline void exportAdaParams(ModelUpdate& ada) {
for(int idx = 0; idx < nVSize; idx++) {
ada.addParam(&(W[idx]));
}
}
inline void initial(PAlphabet alpha, int nOSize, int nISize) {
elems = alpha;
nVSize = elems->size();
nInSize = nISize;
nOutSize = nOSize;
W.resize(nVSize);
for(int idx = 0; idx < nVSize; idx++) {
W[idx].initial(nOSize, nISize);
}
}
inline int getElemId(const string& strFeat) {
return elems->from_string(strFeat);
}
// will add it
inline void save(std::ofstream &os) const {
}
// will add it
inline void load(std::ifstream &is) {
}
};
class TransferNode : public Node {
public:
PNode in;
int xid;
TransferParams* param;
public:
TransferNode() : Node() {
in = NULL;
xid = -1;
param = NULL;
node_type = "transfer";
}
inline void setParam(TransferParams* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
in = NULL;
xid = -1;
}
public:
void forward(Graph *cg, PNode x, const string& strNorm) {
in = x;
xid = param->getElemId(strNorm);
if (xid < 0) {
std::cout << "TransferNode warning: could find the label: " << strNorm << std::endl;
}
degree = 0;
in->addParent(this);
cg->addNode(this);
}
public:
void compute() {
if (xid >= 0) {
val.mat() = param->W[xid].val.mat() * in->val.mat();
}
}
void backward() {
if(xid >= 0) {
param->W[xid].grad.mat() += loss.mat() * in->val.tmat();
in->loss.mat() += param->W[xid].val.mat().transpose() * loss.mat();
}
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
bool result = Node::typeEqual(other);
if (!result) return false;
TransferNode* conv_other = (TransferNode*)other;
if (param != conv_other->param) {
return false;
}
if (xid != conv_other->xid) {
return false;
}
return true;
}
};
class TransferExecute :public Execute {
public:
bool bTrain;
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
inline PExecute TransferNode::generate(bool bTrain, dtype cur_drop_factor) {
TransferExecute* exec = new TransferExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
return exec;
};
#endif /* TransferOP_H_ */
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering(),
ExprContext(ExprContext) {}
/// Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate,
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(
NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted,
bool IsUnavailable, StringRef Message, bool IsStrict,
StringRef Replacement, AvailabilityMergeKind AMK, int Priority,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
bool IsConstexprSpecified);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
NamedDeclSetType &SameDirectiveDecls);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return true if (un)supported features for the current target should be
/// diagnosed if OpenMP (offloading) is enabled.
bool shouldDiagnoseTargetSupportFromOpenMP() const {
return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() ||
isInOpenMPTargetExecutionDirective();
}
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
QualType BaseType, QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define ErrorRelativeWeight PerceptibleReciprocal(16)
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
MagickRealType
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
MagickRealType
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
MagickRealType
diffusion,
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *),
SetGrayscaleImage(Image *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DefineImageColormap(Image *,CubeInfo *,NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info));
if (quantize_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither=image_info->dither;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const CubeInfo *cube_info,
const PixelPacket *pixel,DoublePixelPacket *alpha_pixel)
{
MagickRealType
alpha;
alpha_pixel->index=0;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->opacity == OpaqueOpacity))
{
alpha_pixel->red=(MagickRealType) GetPixelRed(pixel);
alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel);
alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
return;
}
alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel)));
alpha_pixel->red=alpha*GetPixelRed(pixel);
alpha_pixel->green=alpha*GetPixelGreen(pixel);
alpha_pixel->blue=alpha*GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) &
0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) &
0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >>
index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) &
0x1) << 3;
return(id);
}
static inline MagickBooleanType IsSameColor(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
if ((GetPixelRed(p) != GetPixelRed(q)) ||
(GetPixelGreen(p) != GetPixelGreen(q)) ||
(GetPixelBlue(p) != GetPixelBlue(q)))
return(MagickFalse);
if ((image->matte != MagickFalse) &&
(GetPixelOpacity(p) != GetPixelOpacity(q)))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
size_t
number_colors;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace);
number_colors=MagickMax(cube_info->colors,cube_info->maximum_colors);
if (AcquireImageColormap(image,number_colors) == MagickFalse)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if ((cube_info->quantize_info->dither != MagickFalse) &&
(cube_info->quantize_info->dither_method != NoDitherMethod))
(void) DitherImage(image,cube_info);
else
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
const NodeInfo
*node_info;
ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,q,q+count) == MagickFalse)
break;
AssociateAlphaPixel(&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*
(QuantumRange+1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+x+i,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
q++;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=GetPixelLuma(image,image->colormap+0) < QuantumRange/2.0 ? 0.0 :
QuantumRange;
if ((image->colors > 1) &&
(GetPixelLuma(image,image->colormap+0) >
GetPixelLuma(image,image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->matte;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
MagickRealType
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if (cube_info->quantize_info->colorspace != image->colorspace)
{
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
}
midpoint.red=(MagickRealType) QuantumRange/2.0;
midpoint.green=(MagickRealType) QuantumRange/2.0;
midpoint.blue=(MagickRealType) QuantumRange/2.0;
midpoint.opacity=(MagickRealType) QuantumRange/2.0;
midpoint.index=(MagickRealType) QuantumRange/2.0;
error.opacity=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*ClampPixel(
pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if (cube_info->quantize_info->colorspace != image->colorspace)
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither=quantize_info->dither;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
MagickRealType
pixel;
DoublePixelPacket
*magick_restrict q;
MagickRealType
alpha,
beta,
distance;
PixelPacket
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q));
}
pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q);
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=GetPixelAlpha(p)-GetPixelAlpha(q);
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image,&image->exception) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the DefineImageColormap method is:
%
% void DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
MagickRealType
alpha;
PixelPacket
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
SetPixelOpacity(q,OpaqueOpacity);
}
else
{
MagickRealType
opacity;
opacity=(MagickRealType) (alpha*QuantumRange*
node_info->total_color.opacity);
SetPixelOpacity(q,ClampToQuantum(opacity));
if (q->opacity == OpaqueOpacity)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
}
else
{
double
gamma;
gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity));
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.blue)));
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
size_t
number_threads;
ssize_t
i;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,
2*sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
DoublePixelPacket
**pixels;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
exception=(&image->exception);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
size_t
index;
ssize_t
x,
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
ssize_t
i;
ssize_t
u;
u=(y & 0x01) ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(&cube,q+u,&pixel);
if (x > 0)
{
pixel.red+=7.0*cube_info->diffusion*current[u-v].red/16;
pixel.green+=7.0*cube_info->diffusion*current[u-v].green/16;
pixel.blue+=7.0*cube_info->diffusion*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=7.0*cube_info->diffusion*current[u-v].opacity/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=cube_info->diffusion*previous[u+v].red/16;
pixel.green+=cube_info->diffusion*previous[u+v].green/16;
pixel.blue+=cube_info->diffusion*previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=cube_info->diffusion*previous[u+v].opacity/16;
}
pixel.red+=5.0*cube_info->diffusion*previous[u].red/16;
pixel.green+=5.0*cube_info->diffusion*previous[u].green/16;
pixel.blue+=5.0*cube_info->diffusion*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=5.0*cube_info->diffusion*previous[u].opacity/16;
if (x > 0)
{
pixel.red+=3.0*cube_info->diffusion*previous[u-v].red/16;
pixel.green+=3.0*cube_info->diffusion*previous[u-v].green/16;
pixel.blue+=3.0*cube_info->diffusion*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=3.0*cube_info->diffusion*
previous[u-v].opacity/16;
}
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
NodeInfo
*node_info;
size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+
1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+u,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q+u,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q+u,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixel(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].opacity=pixel.opacity-color.opacity;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int);
static MagickBooleanType Riemersma(Image *image,CacheView *image_view,
CubeInfo *cube_info,const size_t level,const unsigned int direction)
{
MagickStatusType
status;
status=MagickTrue;
if (level == 1)
switch (direction)
{
case WestGravity:
{
status=RiemersmaDither(image,image_view,cube_info,EastGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity);
break;
}
case EastGravity:
{
status=RiemersmaDither(image,image_view,cube_info,WestGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity);
break;
}
case NorthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,SouthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity);
break;
}
case SouthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,NorthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity);
break;
}
case EastGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity);
break;
}
case NorthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,WestGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity);
break;
}
case SouthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,EastGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity);
break;
}
default:
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction)
{
#define DitherImageTag "Dither/Image"
CubeInfo
*p;
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
ExceptionInfo
*exception;
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
i;
/*
Distribute error.
*/
exception=(&image->exception);
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
AssociateAlphaPixel(cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].red;
pixel.green+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].green;
pixel.blue+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].opacity;
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
NodeInfo
*node_info;
size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) (1*p->cache[i]);
if (image->storage_class == PseudoClass)
*indexes=(IndexPacket) index;
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube_info->associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixel(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
size_t
extent,
level;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
cube_info->diffusion=StringToDoubleInterval(artifact,1.0);
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
extent=MagickMax(image->columns,image->rows);
level=(size_t) log2((double) extent);
if ((1UL << level) < extent)
level++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,&image->exception);
status=MagickTrue;
if (level > 0)
status=Riemersma(image,image_view,cube_info,level,NorthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
MagickRealType
weight;
size_t
length;
ssize_t
i;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither == MagickFalse)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]=PerceptibleReciprocal(weight);
weight*=exp(log(1.0/ErrorRelativeWeight)/(ErrorQueueLength-1.0));
}
cube_info->diffusion=1.0;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image)
%
% A description of each parameter follows.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
*indexes;
MagickRealType
alpha,
area,
beta,
distance,
gamma,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(indexes+x);
if (image->matte != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p)));
beta=(MagickRealType) (QuantumScale*(QuantumRange-
image->colormap[index].opacity));
}
distance=fabs((double) (alpha*GetPixelRed(p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p++;
}
}
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*mean_error_per_pixel;
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither=MagickTrue;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const MagickBooleanType dither)
% MagickBooleanType PosterizeImageChannel(Image *image,
% const ChannelType channel,const size_t levels,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const MagickBooleanType dither)
{
MagickBooleanType
status;
status=PosterizeImageChannel(image,DefaultChannels,levels,dither);
return(status);
}
MagickExport MagickBooleanType PosterizeImageChannel(Image *image,
const ChannelType channel,const size_t levels,const MagickBooleanType dither)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
ssize_t
i,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=PosterizePixel(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=PosterizePixel(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=PosterizePixel(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,PosterizePixel(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,PosterizePixel(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,PosterizePixel(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither=dither;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.opacity+=node_info->total_color.opacity;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->matte == MagickFalse)
{
if (SetImageGray(image,&image->exception) != MagickFalse)
(void) SetGrayscaleImage(image);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither != MagickFalse) && (depth > 2))
depth--;
if ((image->matte != MagickFalse) && (depth > 5))
depth--;
if (SetImageGray(image,&image->exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
size_t
depth,
maximum_colors,
number_images;
ssize_t
i;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither != MagickFalse)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(&images->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% MagickRealType *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,
MagickRealType *quantize_error)
{
size_t
n,
number_children;
ssize_t
i;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int MagickRealTypeCompare(const void *error_p,const void *error_q)
{
MagickRealType
*p,
*q;
p=(MagickRealType *) error_p;
q=(MagickRealType *) error_q;
if (*p > *q)
return(1);
if (fabs((double) (*q-*p)) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
MagickRealType
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(MagickRealType *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (MagickRealType *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(MagickRealType),
MagickRealTypeCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(MagickRealType *) RelinquishMagickMemory(
quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest color from
% a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image)
%
% A description of each parameter follows:
%
% o image: The image.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelPacket
*color_1,
*color_2;
color_1=(PixelPacket *) x;
color_2=(PixelPacket *) y;
intensity=PixelPacketIntensity(color_1)-PixelPacketIntensity(color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
PixelPacket
*colormap;
size_t
extent;
ssize_t
*colormap_index,
i,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
exception=(&image->exception);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=GetPixelRed(q);
image->colormap[image->colors].green=GetPixelGreen(q);
image->colormap[image->colors].blue=GetPixelBlue(q);
image->colors++;
}
}
SetPixelIndex(indexes+x,colormap_index[intensity]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(Quantum) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelPacket),
IntensityCompare);
colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*colormap));
if (colormap == (PixelPacket *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].opacity]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex(
indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,&image->exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
lake_opt.c | /**************************************
* Author: Rahul Krishna
* unity: rkrish11
**************************************/
/*************************************
* lake.c
*
* Models pebbles on a lake
* Description:
*
* This program uses centered finite differencing to
* solve the wave equation with sources.
*
* The interface is given as
*
* lake [grid_size] [# of pebbles] [end time] [# threads]
*
* where
*
* grid_size - integer, size of one edge of the square grid;
* so the true size of the computational grid will
* be grid_size * grid_size
*
* # of pebbles - number of simulated "pebbles" to start with
*
* end time - the simulation starts from t=0.0 and goes to
* t=[end time]
*
* # threads - the number of threads the simulation uses
*
**************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include "omp.h"
// #include "openacc.h"
#include "./lake.h"
#include "./lake_util.h"
/* Probably not necessary but doesn't hurt */
#define _USE_MATH_DEFINES
int main(int argc, char *argv[])
{
if(argc != 5)
{
fprintf(stdout, "Usage: %s npoints npebs time_finish nthreads \n",argv[0]);
return 0;
}
/* grab the arguments and setup some vars */
int npoints = atoi(argv[1]);
int npebs = atoi(argv[2]);
double end_time = (double)atof(argv[3]);
int nthreads = atoi(argv[4]);
int narea = npoints * npoints;
/* check input params for resitrictions */
if ( npoints % nthreads != 0 )
{
fprintf(stderr, "BONK! npoints must be evenly divisible by nthreads\n Try again!");
return 0;
}
/* get the program directory */
set_wrkdir(argv[0]);
/* main simulation arrays */
double *u_i0, *u_i1;
double *u_cpu, *pebs;
/* u_err is used when calculating the
* error between one version of the code
* and another. */
double *u_err;
/* h is the size of each grid cell */
double h;
/* used for error analysis */
double avgerr;
/* used for time analysis */
double elapsed_cpu;
struct timeval cpu_start, cpu_end;
/* allocate arrays */
u_i0 = (double*)malloc(sizeof(double) * narea);
u_i1 = (double*)malloc(sizeof(double) * narea);
pebs = (double*)malloc(sizeof(double) * narea);
u_cpu = (double*)malloc(sizeof(double) * narea);
start_lake_log("lake.log");
lake_log("running %s with (%d x %d) grid, until %f, with %d threads\n", argv[0], npoints, npoints, end_time, nthreads);
printf("running %s with (%d x %d) grid, until %f, with %d threads\n", argv[0], npoints, npoints, end_time, nthreads);
/* initialize the simulation */
h = (XMAX - XMIN)/npoints;
lake_log("grid step size is %f\n",h);
#ifdef __DEBUG
lake_log("initializing pebbles\n");
#endif
init_pebbles(pebs, npebs, npoints);
#ifdef __DEBUG
lake_log("initializing u0, u1\n");
#endif
init(u_i0, pebs, npoints);
init(u_i1, pebs, npoints);
/* print the initial configuration */
#ifdef __DEBUG
lake_log("printing initial configuration file\n");
#endif
print_heatmap("lake_i.dat", u_i0, npoints, h);
/* time, run the simulation */
#ifdef __DEBUG
lake_log("beginning simulation\n");
#endif
gettimeofday(&cpu_start, NULL);
run_sim(u_cpu, u_i0, u_i1, pebs, npoints, h, end_time, nthreads);
gettimeofday(&cpu_end, NULL);
elapsed_cpu = ((cpu_end.tv_sec + cpu_end.tv_usec * 1e-6)-(
cpu_start.tv_sec + cpu_start.tv_usec * 1e-6));
lake_log("\nSimulation took %f seconds\n", elapsed_cpu);
printf("Simulation took %f seconds\n", elapsed_cpu);
/* print the final configuration */
#ifdef __DEBUG
lake_log("printing final configuration file\n");
#endif
print_heatmap("lake_f.dat", u_cpu, npoints, h);
#ifdef __DEBUG
lake_log("freeing memory\n");
#endif
/* free memory */
free(u_i0);
free(u_i1);
free(pebs);
free(u_cpu);
stop_lake_log();
return 1;
}
/*****************************
* run_sim
*
* Input
* ----------
* double *u0 - the inital configuation
* double *u1 - the intial + 1 configuration
* double *pebbles - the array of pebbles
* int n - the grid size
* double h - the grid step size
* double end_time - the final time
* int nthreads - the number of threads to use
*
* Output
* ----------
* double *u - the final configuration
*
* Description
* ----------
* run_sim is the main driver of the program. It takes in the inital
* configuration and parameters, and runs them until end_time is reached.
*
*******************************/
void run_sim(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads)
{
/*
Note To Graders:
I have the #pragmas sorted. The top pragma is always openmp and the bottom is for openACC. Please comment/uncomment as necessary.
*/
/* arrays used in the calculation */
double un[n][n], uc[n][n], uo[n][n], pebs[n][n];
/* time vars */
double t, dt;
int i, j;
/* allocate the calculation arrays */
/* put the inital configurations into the calculation arrays */
memcpy(uo, u0, sizeof(double) * n * n);
memcpy(uc, u1, sizeof(double) * n * n);
memcpy(pebs, pebbles, sizeof(double) * n * n);
/* start at t=0.0 */
t = 0.;
/* this is probably not ideal. In principal, we should
* keep the time-step at the size determined by the
* CFL condition
*
* dt = h / vel_max
*
* where vel_max is the maximum velocity in the current
* model. The condition dt = h/2. should suffice, but
* be aware the possibility exists for madness and mayhem */
dt = h / 2.;
/* loop until time >= end_time */
// omp_set_num_threads(nthreads);
#pragma opm parallel num_threads(nthreads)
// #pragma acc data copy(uc, uo, un, pebs) // Move data to GPU (copy out is...
//... implicit.)
while(1)
{
/* run a central finite differenmcing scheme to solve
* the wave equation in 2D */
#pragma omp parallel for schedule(static) private(i,j) shared(un, uc, uo, pebs, n) num_threads(nthreads/2)
for( i = 0; i < n; i++)
{
#pragma omp parallel for schedule(static, 2*n/nthreads) shared(un, uc, uo, pebs, n) num_threads(nthreads/2) private(j)
for( j = 0; j < n; j++)
{
/* impose the u|_s = 0 boundary conditions */
if( i == 0 || i == n - 1 || j == 0 || j == n - 1)
{
un[i][j] = 0.;
}
/* otherwise do the FD scheme */
else
{
un[i][j] = 2*uc[i][j] - uo[i][j] + VSQR *(dt * dt) *((uc[i][j-1] + uc[i][j+1] + uc[i+1][j] + uc[i-1][j] + 0.25 * (uc[i-1][j-1] + uc[i+1][j-1]+ uc[i-1][j+1] + uc[i+1][j+1]) - 5 * uc[i][j])/(h * h) + f(pebs[i][j],t));
}
}
}
#pragma omp parallel for schedule(static) private(i,j) shared(un, uc, uo, n) num_threads(nthreads/2)
/* update the calculation arrays for the next time step */
// #pragma acc parallel loop
for( i = 0; i < n; i++ )
{
#pragma omp parallel for schedule(static, 2*n/nthreads) private(j) shared(n, un, uc, uo)
for ( j = 0; j < n; j++ )
{
uo[i][j] = uc[i][j];
uc[i][j] = un[i][j];
}
}
/* have we reached the end? */
if(!tpdt(&t,dt,end_time)) break;
}
/* cpy the last updated to the output array */
memcpy(u, un, sizeof(double) * n * n);
}
/*****************************
* init_pebbles
*
* Input
* ----------
* int pn - the number of pebbles
* int n - the grid size
*
* Output
* ----------
* double *p - an array (dimensioned same as the grid) that
* gives the inital pebble size.
*
* Description
* ----------
* init_pebbles creates a random scattering of some pn pebbles,
* along with a random size. The range of the can be adjusted by changing
* the constant MAX_PSZ.
*
*******************************/
void init_pebbles(double *p, int pn, int n)
{
int i, j, k, idx;
int sz;
srand( time(NULL) );
/* set to zero */
memset(p, 0, sizeof(double) * n * n);
for( k = 0; k < pn ; k++ )
{
/* the offset is to ensure that no pebbles
* are spawned on the very edge of the grid */
i = rand() % (n - 4) + 2;
j = rand() % (n - 4) + 2;
sz = rand() % MAX_PSZ;
idx = j + i * n;
p[idx] = (double) sz;
}
}
/*****************************
* f
*
* Input
* ----------
* double p - the inital pebble value
* double t - the current time
* Returns
* ----------
* the value of the "pebble" source term at time t
*
* Description
* ----------
* Each pebbles influance on the surface will "fade" as
* time marches forward (they may sink away, for instance).
* This function models that - at large t ("large" defined
* relative to the constant TSCALE) the pebble will have
* little to no effect.
*
* NB: this function can be updated to model whatever behavior
* you wish the pebbles to have - they could continually jump
* up and down on the surface, driving more energic waves, for
* example.
******************************/
double f(double p, double t)
{
return -expf(-TSCALE * t) * p;
}
int tpdt(double *t, double dt, double tf)
{
if((*t) + dt > tf) return 0;
(*t) = (*t) + dt;
return 1;
}
void init(double *u, double *pebbles, int n)
{
int i, j, idx;
for(i = 0; i < n ; i++)
{
for(j = 0; j < n ; j++)
{
idx = j + i * n;
u[idx] = f(pebbles[idx], 0.0);
}
}
}
/*****************************
* error_u
*
* Input
* ----------
* double *ua - error 1
* double *ub - error 2
* int n - array extent
*
* Output
* ----------
* double *uerr - array of errors
* double *avgerr - pointer to the average error
*
* Description
* ----------
* Calculates the relative error between ua and ub
*
********************************/
void error_u(double *uerr, double *avgerr, double *ua, double *ub, int n)
{
int i, j, idx;
(*avgerr) = 0.;
for (i = 0; i < n; i++ )
{
for (j = 0; j < n; j++ )
{
idx = j + i * n;
uerr[idx] = fabs((ua[idx]-ub[idx])/ua[idx]);
(*avgerr) = (*avgerr) * ((double)idx/(double)(idx + 1)) + uerr[idx] / (double)(idx + 1);
}
}
}
/*****************************
* print_heatmap
*
* Input
* ----------
* char *filename - the output file name
* double *u - the array to output
* int n - the edge extent of u (ie, u is (n x n)
* double h - the step size in u
* Output
* ----------
* None
*
* Description
* ----------
* Outputs the array u to the file filename
********************************/
void print_heatmap(char *filename, double *u, int n, double h)
{
char full_filename[64];
int i, j, idx;
dir_string(filename, full_filename);
FILE *fp = fopen(full_filename, "w");
for( i = 0; i < n; i++ )
{
for( j = 0; j < n; j++ )
{
idx = j + i * n;
fprintf(fp, "%f %f %f\n", i*h, j*h, u[idx]);
}
}
fclose(fp);
}
|
7744.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (i, j, k) num_threads(#P11)
{
/* E := A*B */
#pragma omp target teams distribute
for (i = 0; i < _PB_NI; i++)
{
#pragma omp parallel for simd num_threads(8)
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
#pragma omp target teams distribute
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp parallel for simd num_threads(8)
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
#pragma omp target teams distribute
for (i = 0; i < _PB_NI; i++)
{
#pragma omp parallel for simd num_threads(8)
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
GB_binop__ne_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__ne_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_fp32)
// A*D function (colscale): GB (_AxD__ne_fp32)
// D*A function (rowscale): GB (_DxB__ne_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_fp32)
// C=scalar+B GB (_bind1st__ne_fp32)
// C=scalar+B' GB (_bind1st_tran__ne_fp32)
// C=A+scalar GB (_bind2nd__ne_fp32)
// C=A'+scalar GB (_bind2nd_tran__ne_fp32)
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_FP32 || GxB_NO_NE_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ne_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
quantize.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE %
% Q Q U U A A NN N T I ZZ E %
% Q Q U U AAAAA N N N T I ZZZ EEEEE %
% Q QQ U U A A N NN T I ZZ E %
% QQQQ UUU A A N N T IIIII ZZZZZ EEEEE %
% %
% %
% MagickCore Methods to Reduce the Number of Unique Colors in an Image %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Realism in computer graphics typically requires using 24 bits/pixel to
% generate an image. Yet many graphic display devices do not contain the
% amount of memory necessary to match the spatial and color resolution of
% the human eye. The Quantize methods takes a 24 bit image and reduces
% the number of colors so it can be displayed on raster device with less
% bits per pixel. In most instances, the quantized image closely
% resembles the original reference image.
%
% A reduction of colors in an image is also desirable for image
% transmission and real-time animation.
%
% QuantizeImage() takes a standard RGB or monochrome images and quantizes
% them down to some fixed number of colors.
%
% For purposes of color allocation, an image is a set of n pixels, where
% each pixel is a point in RGB space. RGB space is a 3-dimensional
% vector space, and each pixel, Pi, is defined by an ordered triple of
% red, green, and blue coordinates, (Ri, Gi, Bi).
%
% Each primary color component (red, green, or blue) represents an
% intensity which varies linearly from 0 to a maximum value, Cmax, which
% corresponds to full saturation of that color. Color allocation is
% defined over a domain consisting of the cube in RGB space with opposite
% vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax =
% 255.
%
% The algorithm maps this domain onto a tree in which each node
% represents a cube within that domain. In the following discussion
% these cubes are defined by the coordinate of two opposite vertices (vertex
% nearest the origin in RGB space and the vertex farthest from the origin).
%
% The tree's root node represents the entire domain, (0,0,0) through
% (Cmax,Cmax,Cmax). Each lower level in the tree is generated by
% subdividing one node's cube into eight smaller cubes of equal size.
% This corresponds to bisecting the parent cube with planes passing
% through the midpoints of each edge.
%
% The basic algorithm operates in three phases: Classification,
% Reduction, and Assignment. Classification builds a color description
% tree for the image. Reduction collapses the tree until the number it
% represents, at most, the number of colors desired in the output image.
% Assignment defines the output image's color map and sets each pixel's
% color by restorage_class in the reduced tree. Our goal is to minimize
% the numerical discrepancies between the original colors and quantized
% colors (quantization error).
%
% Classification begins by initializing a color description tree of
% sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color description
% tree in the storage_class phase for realistic values of Cmax. If
% colors components in the input image are quantized to k-bit precision,
% so that Cmax= 2k-1, the tree would need k levels below the root node to
% allow representing each possible input color in a leaf. This becomes
% prohibitive because the tree's total number of nodes is 1 +
% sum(i=1, k, 8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing the pixel's color. It updates the following data for each
% such node:
%
% n1: Number of pixels whose color is contained in the RGB cube which
% this node represents;
%
% n2: Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb: Sums of the red, green, and blue component values for all
% pixels not classified at a lower depth. The combination of these sums
% and n2 will ultimately characterize the mean color of a set of pixels
% represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the
% quantization error for a node.
%
% Reduction repeatedly prunes the tree until the number of nodes with n2
% > 0 is less than or equal to the maximum number of colors allowed in
% the output image. On any given iteration over the tree, it selects
% those nodes whose E count is minimal for pruning and merges their color
% statistics upward. It uses a pruning threshold, Ep, to govern node
% selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors within
% the cubic volume which the node represents. This includes n1 - n2
% pixels whose colors should be defined by nodes at a lower level in the
% tree.
%
% Assignment generates the output image from the pruned tree. The output
% image consists of two parts: (1) A color map, which is an array of
% color descriptions (RGB triples) for each color present in the output
% image; (2) A pixel array, which represents each pixel as an index
% into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% This method is based on a similar algorithm written by Paul Raveling.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache-view.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/histogram.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/quantize.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
/*
Define declarations.
*/
#if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#define CacheShift 2
#else
#define CacheShift 3
#endif
#define ErrorQueueLength 16
#define ErrorRelativeWeight PerceptibleReciprocal(16)
#define MaxNodes 266817
#define MaxTreeDepth 8
#define NodesInAList 1920
/*
Typdef declarations.
*/
typedef struct _NodeInfo
{
struct _NodeInfo
*parent,
*child[16];
MagickSizeType
number_unique;
DoublePixelPacket
total_color;
MagickRealType
quantize_error;
size_t
color_number,
id,
level;
} NodeInfo;
typedef struct _Nodes
{
NodeInfo
*nodes;
struct _Nodes
*next;
} Nodes;
typedef struct _CubeInfo
{
NodeInfo
*root;
size_t
colors,
maximum_colors;
ssize_t
transparent_index;
MagickSizeType
transparent_pixels;
DoublePixelPacket
target;
MagickRealType
distance,
pruning_threshold,
next_threshold;
size_t
nodes,
free_nodes,
color_number;
NodeInfo
*next_node;
Nodes
*node_queue;
MemoryInfo
*memory_info;
ssize_t
*cache;
DoublePixelPacket
error[ErrorQueueLength];
MagickRealType
diffusion,
weights[ErrorQueueLength];
QuantizeInfo
*quantize_info;
MagickBooleanType
associate_alpha;
ssize_t
x,
y;
size_t
depth;
MagickOffsetType
offset;
MagickSizeType
span;
} CubeInfo;
/*
Method prototypes.
*/
static CubeInfo
*GetCubeInfo(const QuantizeInfo *,const size_t,const size_t);
static NodeInfo
*GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *);
static MagickBooleanType
AssignImageColors(Image *,CubeInfo *),
ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *),
DitherImage(Image *,CubeInfo *),
SetGrayscaleImage(Image *);
static void
ClosestColor(const Image *,CubeInfo *,const NodeInfo *),
DefineImageColormap(Image *,CubeInfo *,NodeInfo *),
DestroyCubeInfo(CubeInfo *),
PruneLevel(CubeInfo *,const NodeInfo *),
PruneToCubeDepth(CubeInfo *,const NodeInfo *),
ReduceImageColors(const Image *,CubeInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireQuantizeInfo() allocates the QuantizeInfo structure.
%
% The format of the AcquireQuantizeInfo method is:
%
% QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info)
{
QuantizeInfo
*quantize_info;
quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info));
if (quantize_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(quantize_info);
if (image_info != (ImageInfo *) NULL)
{
const char
*option;
quantize_info->dither=image_info->dither;
option=GetImageOption(image_info,"dither");
if (option != (const char *) NULL)
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,option);
quantize_info->measure_error=image_info->verbose;
}
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A s s i g n I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AssignImageColors() generates the output image from the pruned tree. The
% output image consists of two parts: (1) A color map, which is an array
% of color descriptions (RGB triples) for each color present in the
% output image; (2) A pixel array, which represents each pixel as an
% index into the color map array.
%
% First, the assignment phase makes one pass over the pruned color
% description tree to establish the image's color map. For each node
% with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean
% color of all pixels that classify no lower than this node. Each of
% these colors becomes an entry in the color map.
%
% Finally, the assignment phase reclassifies each pixel in the pruned
% tree to identify the deepest node containing the pixel's color. The
% pixel's value in the pixel array becomes the index of this node's mean
% color in the color map.
%
% The format of the AssignImageColors() method is:
%
% MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static inline void AssociateAlphaPixel(const CubeInfo *cube_info,
const PixelPacket *pixel,DoublePixelPacket *alpha_pixel)
{
MagickRealType
alpha;
alpha_pixel->index=0;
if ((cube_info->associate_alpha == MagickFalse) ||
(pixel->opacity == OpaqueOpacity))
{
alpha_pixel->red=(MagickRealType) GetPixelRed(pixel);
alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel);
alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
return;
}
alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel)));
alpha_pixel->red=alpha*GetPixelRed(pixel);
alpha_pixel->green=alpha*GetPixelGreen(pixel);
alpha_pixel->blue=alpha*GetPixelBlue(pixel);
alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel);
}
static inline size_t ColorToNodeId(const CubeInfo *cube_info,
const DoublePixelPacket *pixel,size_t index)
{
size_t
id;
id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) &
0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) &
0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >>
index) & 0x01) << 2);
if (cube_info->associate_alpha != MagickFalse)
id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) &
0x1) << 3;
return(id);
}
static inline MagickBooleanType IsSameColor(const Image *image,
const PixelPacket *p,const PixelPacket *q)
{
if ((GetPixelRed(p) != GetPixelRed(q)) ||
(GetPixelGreen(p) != GetPixelGreen(q)) ||
(GetPixelBlue(p) != GetPixelBlue(q)))
return(MagickFalse);
if ((image->matte != MagickFalse) &&
(GetPixelOpacity(p) != GetPixelOpacity(q)))
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info)
{
#define AssignImageTag "Assign/Image"
ColorspaceType
colorspace;
ssize_t
y;
size_t
number_colors;
/*
Allocate image colormap.
*/
colorspace=image->colorspace;
if (cube_info->quantize_info->colorspace != UndefinedColorspace)
(void) TransformImageColorspace(image,cube_info->quantize_info->colorspace);
number_colors=MagickMax(cube_info->colors,cube_info->maximum_colors);
if (AcquireImageColormap(image,number_colors) == MagickFalse)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
image->colors=0;
cube_info->transparent_pixels=0;
cube_info->transparent_index=(-1);
DefineImageColormap(image,cube_info,cube_info->root);
/*
Create a reduced color image.
*/
if ((cube_info->quantize_info->dither != MagickFalse) &&
(cube_info->quantize_info->dither_method != NoDitherMethod))
(void) DitherImage(image,cube_info);
else
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CubeInfo
cube;
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
ssize_t
count;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
for (x=0; x < (ssize_t) image->columns; x+=count)
{
DoublePixelPacket
pixel;
const NodeInfo
*node_info;
ssize_t
i;
size_t
id,
index;
/*
Identify the deepest node containing the pixel's color.
*/
for (count=1; (x+count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,q,q+count) == MagickFalse)
break;
AssociateAlphaPixel(&cube,q,&pixel);
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*
(QuantumRange+1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
index=cube.color_number;
for (i=0; i < (ssize_t) count; i++)
{
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+x+i,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
q++;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
}
if (cube_info->quantize_info->measure_error != MagickFalse)
(void) GetImageQuantizeError(image);
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
{
double
intensity;
/*
Monochrome image.
*/
intensity=GetPixelLuma(image,image->colormap+0) < QuantumRange/2.0 ? 0.0 :
QuantumRange;
if ((image->colors > 1) &&
(GetPixelLuma(image,image->colormap+0) >
GetPixelLuma(image,image->colormap+1)))
intensity=(double) QuantumRange;
image->colormap[0].red=intensity;
image->colormap[0].green=intensity;
image->colormap[0].blue=intensity;
if (image->colors > 1)
{
image->colormap[1].red=(double) QuantumRange-intensity;
image->colormap[1].green=(double) QuantumRange-intensity;
image->colormap[1].blue=(double) QuantumRange-intensity;
}
}
(void) SyncImage(image);
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(IssRGBCompatibleColorspace(colorspace) == MagickFalse))
(void) TransformImageColorspace(image,colorspace);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClassifyImageColors() begins by initializing a color description tree
% of sufficient depth to represent each possible input color in a leaf.
% However, it is impractical to generate a fully-formed color
% description tree in the storage_class phase for realistic values of
% Cmax. If colors components in the input image are quantized to k-bit
% precision, so that Cmax= 2k-1, the tree would need k levels below the
% root node to allow representing each possible input color in a leaf.
% This becomes prohibitive because the tree's total number of nodes is
% 1 + sum(i=1,k,8k).
%
% A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255.
% Therefore, to avoid building a fully populated tree, QUANTIZE: (1)
% Initializes data structures for nodes only as they are needed; (2)
% Chooses a maximum depth for the tree as a function of the desired
% number of colors in the output image (currently log2(colormap size)).
%
% For each pixel in the input image, storage_class scans downward from
% the root of the color description tree. At each level of the tree it
% identifies the single node which represents a cube in RGB space
% containing It updates the following data for each such node:
%
% n1 : Number of pixels whose color is contained in the RGB cube
% which this node represents;
%
% n2 : Number of pixels whose color is not represented in a node at
% lower depth in the tree; initially, n2 = 0 for all nodes except
% leaves of the tree.
%
% Sr, Sg, Sb : Sums of the red, green, and blue component values for
% all pixels not classified at a lower depth. The combination of
% these sums and n2 will ultimately characterize the mean color of a
% set of pixels represented by this node.
%
% E: the distance squared in RGB space between each pixel contained
% within a node and the nodes' center. This represents the quantization
% error for a node.
%
% The format of the ClassifyImageColors() method is:
%
% MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
% const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o image: the image.
%
*/
static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info)
{
MagickBooleanType
associate_alpha;
associate_alpha=image->matte;
if ((cube_info->quantize_info->number_colors == 2) &&
((cube_info->quantize_info->colorspace == LinearGRAYColorspace) ||
(cube_info->quantize_info->colorspace == GRAYColorspace)))
associate_alpha=MagickFalse;
cube_info->associate_alpha=associate_alpha;
}
static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info,
const Image *image,ExceptionInfo *exception)
{
#define ClassifyImageTag "Classify/Image"
CacheView
*image_view;
DoublePixelPacket
error,
mid,
midpoint,
pixel;
MagickBooleanType
proceed;
MagickRealType
bisect;
NodeInfo
*node_info;
size_t
count,
id,
index,
level;
ssize_t
y;
/*
Classify the first cube_info->maximum_colors colors to a tree depth of 8.
*/
SetAssociatedAlpha(image,cube_info);
if (cube_info->quantize_info->colorspace != image->colorspace)
{
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,
cube_info->quantize_info->colorspace);
else
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
}
midpoint.red=(MagickRealType) QuantumRange/2.0;
midpoint.green=(MagickRealType) QuantumRange/2.0;
midpoint.blue=(MagickRealType) QuantumRange/2.0;
midpoint.opacity=(MagickRealType) QuantumRange/2.0;
midpoint.index=(MagickRealType) QuantumRange/2.0;
error.opacity=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= MaxTreeDepth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
continue;
}
if (level == MaxTreeDepth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
if (cube_info->colors > cube_info->maximum_colors)
{
PruneToCubeDepth(cube_info,cube_info->root);
break;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
for (y++; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
if (cube_info->nodes > MaxNodes)
{
/*
Prune one level if the color tree is too large.
*/
PruneLevel(cube_info,cube_info->root);
cube_info->depth--;
}
for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count)
{
/*
Start at the root and descend the color cube tree.
*/
for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++)
if (IsSameColor(image,p,p+count) == MagickFalse)
break;
AssociateAlphaPixel(cube_info,p,&pixel);
index=MaxTreeDepth-1;
bisect=((MagickRealType) QuantumRange+1.0)/2.0;
mid=midpoint;
node_info=cube_info->root;
for (level=1; level <= cube_info->depth; level++)
{
double
distance;
bisect*=0.5;
id=ColorToNodeId(cube_info,&pixel,index);
mid.red+=(id & 1) != 0 ? bisect : -bisect;
mid.green+=(id & 2) != 0 ? bisect : -bisect;
mid.blue+=(id & 4) != 0 ? bisect : -bisect;
mid.opacity+=(id & 8) != 0 ? bisect : -bisect;
if (node_info->child[id] == (NodeInfo *) NULL)
{
/*
Set colors of new node to contain pixel.
*/
node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info);
if (node_info->child[id] == (NodeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","%s",
image->filename);
continue;
}
if (level == cube_info->depth)
cube_info->colors++;
}
/*
Approximate the quantization error represented by this node.
*/
node_info=node_info->child[id];
error.red=QuantumScale*(pixel.red-mid.red);
error.green=QuantumScale*(pixel.green-mid.green);
error.blue=QuantumScale*(pixel.blue-mid.blue);
if (cube_info->associate_alpha != MagickFalse)
error.opacity=QuantumScale*(pixel.opacity-mid.opacity);
distance=(double) (error.red*error.red+error.green*error.green+
error.blue*error.blue+error.opacity*error.opacity);
if (IsNaN(distance) != 0)
distance=0.0;
node_info->quantize_error+=count*sqrt(distance);
cube_info->root->quantize_error+=node_info->quantize_error;
index--;
}
/*
Sum RGB for this leaf for later derivation of the mean cube color.
*/
node_info->number_unique+=count;
node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red);
node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green);
node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
node_info->total_color.opacity+=count*QuantumScale*ClampPixel(
pixel.opacity);
else
node_info->total_color.opacity+=count*QuantumScale*
ClampPixel(OpaqueOpacity);
p+=count;
}
proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
if (cube_info->quantize_info->colorspace != image->colorspace)
if ((cube_info->quantize_info->colorspace != UndefinedColorspace) &&
(cube_info->quantize_info->colorspace != CMYKColorspace))
(void) TransformImageColorspace((Image *) image,sRGBColorspace);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneQuantizeInfo() makes a duplicate of the given quantize info structure,
% or if quantize info is NULL, a new one.
%
% The format of the CloneQuantizeInfo method is:
%
% QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o clone_info: Method CloneQuantizeInfo returns a duplicate of the given
% quantize info, or if image info is NULL a new one.
%
% o quantize_info: a structure of type info.
%
*/
MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info)
{
QuantizeInfo
*clone_info;
clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info));
if (clone_info == (QuantizeInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetQuantizeInfo(clone_info);
if (quantize_info == (QuantizeInfo *) NULL)
return(clone_info);
clone_info->number_colors=quantize_info->number_colors;
clone_info->tree_depth=quantize_info->tree_depth;
clone_info->dither=quantize_info->dither;
clone_info->dither_method=quantize_info->dither_method;
clone_info->colorspace=quantize_info->colorspace;
clone_info->measure_error=quantize_info->measure_error;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o s e s t C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClosestColor() traverses the color cube tree at a particular node and
% determines which colormap entry best represents the input color.
%
% The format of the ClosestColor method is:
%
% void ClosestColor(const Image *image,CubeInfo *cube_info,
% const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void ClosestColor(const Image *image,CubeInfo *cube_info,
const NodeInfo *node_info)
{
ssize_t
i;
size_t
number_children;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
ClosestColor(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
MagickRealType
pixel;
DoublePixelPacket
*magick_restrict q;
MagickRealType
alpha,
beta,
distance;
PixelPacket
*magick_restrict p;
/*
Determine if this color is "closest".
*/
p=image->colormap+node_info->color_number;
q=(&cube_info->target);
alpha=1.0;
beta=1.0;
if (cube_info->associate_alpha != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p));
beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q));
}
pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q);
distance=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q);
distance+=pixel*pixel;
if (distance <= cube_info->distance)
{
if (cube_info->associate_alpha != MagickFalse)
{
pixel=GetPixelAlpha(p)-GetPixelAlpha(q);
distance+=pixel*pixel;
}
if (distance <= cube_info->distance)
{
cube_info->distance=distance;
cube_info->color_number=node_info->color_number;
}
}
}
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p r e s s I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompressImageColormap() compresses an image colormap by removing any
% duplicate or unused color entries.
%
% The format of the CompressImageColormap method is:
%
% MagickBooleanType CompressImageColormap(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType CompressImageColormap(Image *image)
{
QuantizeInfo
quantize_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsPaletteImage(image,&image->exception) == MagickFalse)
return(MagickFalse);
GetQuantizeInfo(&quantize_info);
quantize_info.number_colors=image->colors;
quantize_info.tree_depth=MaxTreeDepth;
return(QuantizeImage(&quantize_info,image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e I m a g e C o l o r m a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageColormap() traverses the color cube tree and notes each colormap
% entry. A colormap entry is any node in the color cube tree where the
% of unique colors is not zero.
%
% The format of the DefineImageColormap method is:
%
% void DefineImageColormap(Image *image,CubeInfo *cube_info,
% NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: the address of a structure of type NodeInfo which points to a
% node in the color cube tree that is to be pruned.
%
*/
static void DefineImageColormap(Image *image,CubeInfo *cube_info,
NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
DefineImageColormap(image,cube_info,node_info->child[i]);
if (node_info->number_unique != 0)
{
MagickRealType
alpha;
PixelPacket
*magick_restrict q;
/*
Colormap entry is defined by the mean color in this cube.
*/
q=image->colormap+image->colors;
alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique);
alpha=PerceptibleReciprocal(alpha);
if (cube_info->associate_alpha == MagickFalse)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
SetPixelOpacity(q,OpaqueOpacity);
}
else
{
MagickRealType
opacity;
opacity=(MagickRealType) (alpha*QuantumRange*
node_info->total_color.opacity);
SetPixelOpacity(q,ClampToQuantum(opacity));
if (q->opacity == OpaqueOpacity)
{
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
QuantumRange*node_info->total_color.blue)));
}
else
{
double
gamma;
gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity));
gamma=PerceptibleReciprocal(gamma);
SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha*
gamma*QuantumRange*node_info->total_color.blue)));
if (node_info->number_unique > cube_info->transparent_pixels)
{
cube_info->transparent_pixels=node_info->number_unique;
cube_info->transparent_index=(ssize_t) image->colors;
}
}
}
node_info->color_number=image->colors++;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyCubeInfo() deallocates memory associated with an image.
%
% The format of the DestroyCubeInfo method is:
%
% DestroyCubeInfo(CubeInfo *cube_info)
%
% A description of each parameter follows:
%
% o cube_info: the address of a structure of type CubeInfo.
%
*/
static void DestroyCubeInfo(CubeInfo *cube_info)
{
Nodes
*nodes;
/*
Release color cube tree storage.
*/
do
{
nodes=cube_info->node_queue->next;
cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory(
cube_info->node_queue->nodes);
cube_info->node_queue=(Nodes *) RelinquishMagickMemory(
cube_info->node_queue);
cube_info->node_queue=nodes;
} while (cube_info->node_queue != (Nodes *) NULL);
if (cube_info->memory_info != (MemoryInfo *) NULL)
cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info);
cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info);
cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo
% structure.
%
% The format of the DestroyQuantizeInfo method is:
%
% QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
*/
MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
quantize_info->signature=(~MagickCoreSignature);
quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info);
return(quantize_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DitherImage() distributes the difference between an original image and
% the corresponding color reduced algorithm to neighboring pixels using
% serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns
% MagickTrue if the image is dithered otherwise MagickFalse.
%
% The format of the DitherImage method is:
%
% MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels)
{
ssize_t
i;
assert(pixels != (DoublePixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (DoublePixelPacket *) NULL)
pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static DoublePixelPacket **AcquirePixelThreadSet(const size_t count)
{
DoublePixelPacket
**pixels;
size_t
number_threads;
ssize_t
i;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (DoublePixelPacket **) NULL)
return((DoublePixelPacket **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,
2*sizeof(**pixels));
if (pixels[i] == (DoublePixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static inline ssize_t CacheOffset(CubeInfo *cube_info,
const DoublePixelPacket *pixel)
{
#define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift)))
#define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift)))
#define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift)))
#define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift)))
ssize_t
offset;
offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) |
GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) |
BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue))));
if (cube_info->associate_alpha != MagickFalse)
offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity)));
return(offset);
}
static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
DoublePixelPacket
**pixels;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
/*
Distribute quantization error using Floyd-Steinberg.
*/
pixels=AcquirePixelThreadSet(image->columns);
if (pixels == (DoublePixelPacket **) NULL)
return(MagickFalse);
exception=(&image->exception);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
CubeInfo
cube;
DoublePixelPacket
*current,
*previous;
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
size_t
index;
ssize_t
x,
v;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
cube=(*cube_info);
current=pixels[id]+(y & 0x01)*image->columns;
previous=pixels[id]+((y+1) & 0x01)*image->columns;
v=(ssize_t) ((y & 0x01) ? -1 : 1);
for (x=0; x < (ssize_t) image->columns; x++)
{
DoublePixelPacket
color,
pixel;
ssize_t
i;
ssize_t
u;
u=(y & 0x01) ? (ssize_t) image->columns-1-x : x;
AssociateAlphaPixel(&cube,q+u,&pixel);
if (x > 0)
{
pixel.red+=7.0*cube_info->diffusion*current[u-v].red/16;
pixel.green+=7.0*cube_info->diffusion*current[u-v].green/16;
pixel.blue+=7.0*cube_info->diffusion*current[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=7.0*cube_info->diffusion*current[u-v].opacity/16;
}
if (y > 0)
{
if (x < (ssize_t) (image->columns-1))
{
pixel.red+=cube_info->diffusion*previous[u+v].red/16;
pixel.green+=cube_info->diffusion*previous[u+v].green/16;
pixel.blue+=cube_info->diffusion*previous[u+v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=cube_info->diffusion*previous[u+v].opacity/16;
}
pixel.red+=5.0*cube_info->diffusion*previous[u].red/16;
pixel.green+=5.0*cube_info->diffusion*previous[u].green/16;
pixel.blue+=5.0*cube_info->diffusion*previous[u].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=5.0*cube_info->diffusion*previous[u].opacity/16;
if (x > 0)
{
pixel.red+=3.0*cube_info->diffusion*previous[u-v].red/16;
pixel.green+=3.0*cube_info->diffusion*previous[u-v].green/16;
pixel.blue+=3.0*cube_info->diffusion*previous[u-v].blue/16;
if (cube.associate_alpha != MagickFalse)
pixel.opacity+=3.0*cube_info->diffusion*
previous[u-v].opacity/16;
}
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube.associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(&cube,&pixel);
if (cube.cache[i] < 0)
{
NodeInfo
*node_info;
size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=cube.root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(&cube,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
cube.target=pixel;
cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+
1.0)+1.0);
ClosestColor(image,&cube,node_info->parent);
cube.cache[i]=(ssize_t) cube.color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) cube.cache[i];
if (image->storage_class == PseudoClass)
SetPixelIndex(indexes+u,index);
if (cube.quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q+u,image->colormap+index);
if (cube.associate_alpha != MagickFalse)
SetPixelOpacity(q+u,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
/*
Store the error.
*/
AssociateAlphaPixel(&cube,image->colormap+index,&color);
current[u].red=pixel.red-color.red;
current[u].green=pixel.green-color.green;
current[u].blue=pixel.blue-color.blue;
if (cube.associate_alpha != MagickFalse)
current[u].opacity=pixel.opacity-color.opacity;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
image_view=DestroyCacheView(image_view);
pixels=DestroyPixelThreadSet(pixels);
return(MagickTrue);
}
static MagickBooleanType
RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int);
static MagickBooleanType Riemersma(Image *image,CacheView *image_view,
CubeInfo *cube_info,const size_t level,const unsigned int direction)
{
MagickStatusType
status;
status=MagickTrue;
if (level == 1)
switch (direction)
{
case WestGravity:
{
status=RiemersmaDither(image,image_view,cube_info,EastGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity);
break;
}
case EastGravity:
{
status=RiemersmaDither(image,image_view,cube_info,WestGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity);
break;
}
case NorthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,SouthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity);
break;
}
case SouthGravity:
{
status=RiemersmaDither(image,image_view,cube_info,NorthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity);
break;
}
default:
break;
}
else
switch (direction)
{
case WestGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity);
break;
}
case EastGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity);
break;
}
case NorthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,WestGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,EastGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,NorthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,EastGravity);
break;
}
case SouthGravity:
{
status=Riemersma(image,image_view,cube_info,level-1,EastGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,NorthGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,WestGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,SouthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,SouthGravity);
if (status != MagickFalse)
status=Riemersma(image,image_view,cube_info,level-1,WestGravity);
break;
}
default:
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view,
CubeInfo *cube_info,const unsigned int direction)
{
#define DitherImageTag "Dither/Image"
CubeInfo
*p;
DoublePixelPacket
color,
pixel;
MagickBooleanType
proceed;
size_t
index;
p=cube_info;
if ((p->x >= 0) && (p->x < (ssize_t) image->columns) &&
(p->y >= 0) && (p->y < (ssize_t) image->rows))
{
ExceptionInfo
*exception;
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
i;
/*
Distribute error.
*/
exception=(&image->exception);
q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickFalse);
indexes=GetCacheViewAuthenticIndexQueue(image_view);
AssociateAlphaPixel(cube_info,q,&pixel);
for (i=0; i < ErrorQueueLength; i++)
{
pixel.red+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].red;
pixel.green+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].green;
pixel.blue+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].blue;
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity+=ErrorRelativeWeight*cube_info->diffusion*p->weights[i]*
p->error[i].opacity;
}
pixel.red=(MagickRealType) ClampPixel(pixel.red);
pixel.green=(MagickRealType) ClampPixel(pixel.green);
pixel.blue=(MagickRealType) ClampPixel(pixel.blue);
if (cube_info->associate_alpha != MagickFalse)
pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity);
i=CacheOffset(cube_info,&pixel);
if (p->cache[i] < 0)
{
NodeInfo
*node_info;
size_t
id;
/*
Identify the deepest node containing the pixel's color.
*/
node_info=p->root;
for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--)
{
id=ColorToNodeId(cube_info,&pixel,index);
if (node_info->child[id] == (NodeInfo *) NULL)
break;
node_info=node_info->child[id];
}
/*
Find closest color among siblings and their children.
*/
p->target=pixel;
p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType)
QuantumRange+1.0)+1.0);
ClosestColor(image,p,node_info->parent);
p->cache[i]=(ssize_t) p->color_number;
}
/*
Assign pixel to closest colormap entry.
*/
index=(size_t) (1*p->cache[i]);
if (image->storage_class == PseudoClass)
*indexes=(IndexPacket) index;
if (cube_info->quantize_info->measure_error == MagickFalse)
{
SetPixelRgb(q,image->colormap+index);
if (cube_info->associate_alpha != MagickFalse)
SetPixelOpacity(q,image->colormap[index].opacity);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
return(MagickFalse);
/*
Propagate the error as the last entry of the error queue.
*/
(void) memmove(p->error,p->error+1,(ErrorQueueLength-1)*
sizeof(p->error[0]));
AssociateAlphaPixel(cube_info,image->colormap+index,&color);
p->error[ErrorQueueLength-1].red=pixel.red-color.red;
p->error[ErrorQueueLength-1].green=pixel.green-color.green;
p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue;
if (cube_info->associate_alpha != MagickFalse)
p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity;
proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span);
if (proceed == MagickFalse)
return(MagickFalse);
p->offset++;
}
switch (direction)
{
case WestGravity: p->x--; break;
case EastGravity: p->x++; break;
case NorthGravity: p->y--; break;
case SouthGravity: p->y++; break;
}
return(MagickTrue);
}
static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
status;
size_t
extent,
level;
artifact=GetImageArtifact(image,"dither:diffusion-amount");
if (artifact != (const char *) NULL)
cube_info->diffusion=StringToDoubleInterval(artifact,1.0);
if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod)
return(FloydSteinbergDither(image,cube_info));
/*
Distribute quantization error along a Hilbert curve.
*/
(void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error));
cube_info->x=0;
cube_info->y=0;
extent=MagickMax(image->columns,image->rows);
level=(size_t) log2((double) extent);
if ((1UL << level) < extent)
level++;
cube_info->offset=0;
cube_info->span=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,&image->exception);
status=MagickTrue;
if (level > 0)
status=Riemersma(image,image_view,cube_info,level,NorthGravity);
if (status != MagickFalse)
status=RiemersmaDither(image,image_view,cube_info,ForgetGravity);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t C u b e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetCubeInfo() initialize the Cube data structure.
%
% The format of the GetCubeInfo method is:
%
% CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info,
% const size_t depth,const size_t maximum_colors)
%
% A description of each parameter follows.
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o depth: Normally, this integer value is zero or one. A zero or
% one tells Quantize to choose a optimal tree depth of Log4(number_colors).
% A tree of this depth generally allows the best representation of the
% reference image with the least amount of memory and the fastest
% computational speed. In some cases, such as an image with low color
% dispersion (a few number of colors), a value other than
% Log4(number_colors) is required. To expand the color tree completely,
% use a value of 8.
%
% o maximum_colors: maximum colors.
%
*/
static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info,
const size_t depth,const size_t maximum_colors)
{
CubeInfo
*cube_info;
MagickRealType
weight;
size_t
length;
ssize_t
i;
/*
Initialize tree to describe color cube_info.
*/
cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info));
if (cube_info == (CubeInfo *) NULL)
return((CubeInfo *) NULL);
(void) memset(cube_info,0,sizeof(*cube_info));
cube_info->depth=depth;
if (cube_info->depth > MaxTreeDepth)
cube_info->depth=MaxTreeDepth;
if (cube_info->depth < 2)
cube_info->depth=2;
cube_info->maximum_colors=maximum_colors;
/*
Initialize root node.
*/
cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL);
if (cube_info->root == (NodeInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->root->parent=cube_info->root;
cube_info->quantize_info=CloneQuantizeInfo(quantize_info);
if (cube_info->quantize_info->dither == MagickFalse)
return(cube_info);
/*
Initialize dither resources.
*/
length=(size_t) (1UL << (4*(8-CacheShift)));
cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache));
if (cube_info->memory_info == (MemoryInfo *) NULL)
return((CubeInfo *) NULL);
cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info);
/*
Initialize color cache.
*/
(void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length);
/*
Distribute weights along a curve of exponential decay.
*/
weight=1.0;
for (i=0; i < ErrorQueueLength; i++)
{
cube_info->weights[i]=PerceptibleReciprocal(weight);
weight*=exp(log(1.0/ErrorRelativeWeight)/(ErrorQueueLength-1.0));
}
cube_info->diffusion=1.0;
return(cube_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t N o d e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNodeInfo() allocates memory for a new node in the color cube tree and
% presets all fields to zero.
%
% The format of the GetNodeInfo method is:
%
% NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
% const size_t level,NodeInfo *parent)
%
% A description of each parameter follows.
%
% o node: The GetNodeInfo method returns a pointer to a queue of nodes.
%
% o id: Specifies the child number of the node.
%
% o level: Specifies the level in the storage_class the node resides.
%
*/
static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id,
const size_t level,NodeInfo *parent)
{
NodeInfo
*node_info;
if (cube_info->free_nodes == 0)
{
Nodes
*nodes;
/*
Allocate a new queue of nodes.
*/
nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes));
if (nodes == (Nodes *) NULL)
return((NodeInfo *) NULL);
nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList,
sizeof(*nodes->nodes));
if (nodes->nodes == (NodeInfo *) NULL)
return((NodeInfo *) NULL);
nodes->next=cube_info->node_queue;
cube_info->node_queue=nodes;
cube_info->next_node=nodes->nodes;
cube_info->free_nodes=NodesInAList;
}
cube_info->nodes++;
cube_info->free_nodes--;
node_info=cube_info->next_node++;
(void) memset(node_info,0,sizeof(*node_info));
node_info->parent=parent;
node_info->id=id;
node_info->level=level;
return(node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t i z e E r r o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantizeError() measures the difference between the original
% and quantized images. This difference is the total quantization error.
% The error is computed by summing over all pixels in an image the distance
% squared in RGB space between each reference pixel value and its quantized
% value. These values are computed:
%
% o mean_error_per_pixel: This value is the mean error for any single
% pixel in the image.
%
% o normalized_mean_square_error: This value is the normalized mean
% quantization error for any single pixel in the image. This distance
% measure is normalized to a range between 0 and 1. It is independent
% of the range of red, green, and blue values in the image.
%
% o normalized_maximum_square_error: Thsi value is the normalized
% maximum quantization error for any single pixel in the image. This
% distance measure is normalized to a range between 0 and 1. It is
% independent of the range of red, green, and blue values in your image.
%
% The format of the GetImageQuantizeError method is:
%
% MagickBooleanType GetImageQuantizeError(Image *image)
%
% A description of each parameter follows.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageQuantizeError(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
*indexes;
MagickRealType
alpha,
area,
beta,
distance,
gamma,
maximum_error,
mean_error,
mean_error_per_pixel;
ssize_t
index,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception);
(void) memset(&image->error,0,sizeof(image->error));
if (image->storage_class == DirectClass)
return(MagickTrue);
alpha=1.0;
beta=1.0;
area=3.0*image->columns*image->rows;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
exception=(&image->exception);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=(ssize_t) GetPixelIndex(indexes+x);
if (image->matte != MagickFalse)
{
alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p)));
beta=(MagickRealType) (QuantumScale*(QuantumRange-
image->colormap[index].opacity));
}
distance=fabs((double) (alpha*GetPixelRed(p)-beta*
image->colormap[index].red));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelGreen(p)-beta*
image->colormap[index].green));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
distance=fabs((double) (alpha*GetPixelBlue(p)-beta*
image->colormap[index].blue));
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
p++;
}
}
image_view=DestroyCacheView(image_view);
gamma=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=gamma*mean_error_per_pixel;
image->error.normalized_mean_error=gamma*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t Q u a n t i z e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetQuantizeInfo() initializes the QuantizeInfo structure.
%
% The format of the GetQuantizeInfo method is:
%
% GetQuantizeInfo(QuantizeInfo *quantize_info)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to a QuantizeInfo structure.
%
*/
MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(quantize_info != (QuantizeInfo *) NULL);
(void) memset(quantize_info,0,sizeof(*quantize_info));
quantize_info->number_colors=256;
quantize_info->dither=MagickTrue;
quantize_info->dither_method=RiemersmaDitherMethod;
quantize_info->colorspace=UndefinedColorspace;
quantize_info->measure_error=MagickFalse;
quantize_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o s t e r i z e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PosterizeImage() reduces the image to a limited number of colors for a
% "poster" effect.
%
% The format of the PosterizeImage method is:
%
% MagickBooleanType PosterizeImage(Image *image,const size_t levels,
% const MagickBooleanType dither)
% MagickBooleanType PosterizeImageChannel(Image *image,
% const ChannelType channel,const size_t levels,
% const MagickBooleanType dither)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to an Image structure.
%
% o levels: Number of color levels allowed in each channel. Very low values
% (2, 3, or 4) have the most visible effect.
%
% o dither: Set this integer value to something other than zero to dither
% the mapped image.
%
*/
static inline double MagickRound(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return(floor(x));
return(ceil(x));
}
MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels,
const MagickBooleanType dither)
{
MagickBooleanType
status;
status=PosterizeImageChannel(image,DefaultChannels,levels,dither);
return(status);
}
MagickExport MagickBooleanType PosterizeImageChannel(Image *image,
const ChannelType channel,const size_t levels,const MagickBooleanType dither)
{
#define PosterizeImageTag "Posterize/Image"
#define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \
MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1))
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
QuantizeInfo
*quantize_info;
ssize_t
i,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Posterize colormap.
*/
if ((channel & RedChannel) != 0)
image->colormap[i].red=PosterizePixel(image->colormap[i].red);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=PosterizePixel(image->colormap[i].green);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=PosterizePixel(image->colormap[i].blue);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity);
}
/*
Posterize image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,PosterizePixel(GetPixelRed(q)));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,PosterizePixel(GetPixelGreen(q)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,PosterizePixel(GetPixelBlue(q)));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL);
quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels*
levels,MaxColormapSize+1);
quantize_info->dither=dither;
quantize_info->tree_depth=MaxTreeDepth;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e C h i l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneChild() deletes the given node and merges its statistics into its
% parent.
%
% The format of the PruneSubtree method is:
%
% PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info)
{
NodeInfo
*parent;
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneChild(cube_info,node_info->child[i]);
/*
Merge color statistics into parent.
*/
parent=node_info->parent;
parent->number_unique+=node_info->number_unique;
parent->total_color.red+=node_info->total_color.red;
parent->total_color.green+=node_info->total_color.green;
parent->total_color.blue+=node_info->total_color.blue;
parent->total_color.opacity+=node_info->total_color.opacity;
parent->child[node_info->id]=(NodeInfo *) NULL;
cube_info->nodes--;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e L e v e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneLevel() deletes all nodes at the bottom level of the color tree merging
% their color statistics into their parent node.
%
% The format of the PruneLevel method is:
%
% PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneLevel(cube_info,node_info->child[i]);
if (node_info->level == cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P r u n e T o C u b e D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PruneToCubeDepth() deletes any nodes at a depth greater than
% cube_info->depth while merging their color statistics into their parent
% node.
%
% The format of the PruneToCubeDepth method is:
%
% PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
PruneToCubeDepth(cube_info,node_info->child[i]);
if (node_info->level > cube_info->depth)
PruneChild(cube_info,node_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImage() analyzes the colors within a reference image and chooses a
% fixed number of colors to represent the image. The goal of the algorithm
% is to minimize the color difference between the input and output image while
% minimizing the processing time.
%
% The format of the QuantizeImage method is:
%
% MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
% Image *image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info,
Image *image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
size_t
depth,
maximum_colors;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
if (image->matte == MagickFalse)
{
if (SetImageGray(image,&image->exception) != MagickFalse)
(void) SetGrayscaleImage(image);
}
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if ((quantize_info->dither != MagickFalse) && (depth > 2))
depth--;
if ((image->matte != MagickFalse) && (depth > 5))
depth--;
if (SetImageGray(image,&image->exception) != MagickFalse)
depth=MaxTreeDepth;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status != MagickFalse)
{
/*
Reduce the number of colors in the image.
*/
if (cube_info->colors > cube_info->maximum_colors)
ReduceImageColors(image,cube_info);
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u a n t i z e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeImages() analyzes the colors within a set of reference images and
% chooses a fixed number of colors to represent the set. The goal of the
% algorithm is to minimize the color difference between the input and output
% images while minimizing the processing time.
%
% The format of the QuantizeImages method is:
%
% MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
% Image *images)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: Specifies a pointer to a list of Image structures.
%
*/
MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info,
Image *images)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
proceed,
status;
MagickProgressMonitor
progress_monitor;
size_t
depth,
maximum_colors,
number_images;
ssize_t
i;
assert(quantize_info != (const QuantizeInfo *) NULL);
assert(quantize_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
if (GetNextImageInList(images) == (Image *) NULL)
{
/*
Handle a single image with QuantizeImage.
*/
status=QuantizeImage(quantize_info,images);
return(status);
}
status=MagickFalse;
maximum_colors=quantize_info->number_colors;
if (maximum_colors == 0)
maximum_colors=MaxColormapSize;
if (maximum_colors > MaxColormapSize)
maximum_colors=MaxColormapSize;
depth=quantize_info->tree_depth;
if (depth == 0)
{
size_t
colors;
/*
Depth of color tree is: Log4(colormap size)+2.
*/
colors=maximum_colors;
for (depth=1; colors != 0; depth++)
colors>>=2;
if (quantize_info->dither != MagickFalse)
depth--;
}
/*
Initialize color cube.
*/
cube_info=GetCubeInfo(quantize_info,depth,maximum_colors);
if (cube_info == (CubeInfo *) NULL)
{
(void) ThrowMagickException(&images->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(MagickFalse);
}
number_images=GetImageListLength(images);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,
image->client_data);
status=ClassifyImageColors(cube_info,image,&image->exception);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
if (status != MagickFalse)
{
/*
Reduce the number of colors in an image sequence.
*/
ReduceImageColors(images,cube_info);
image=images;
for (i=0; image != (Image *) NULL; i++)
{
progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor)
NULL,image->client_data);
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
(void) SetImageProgressMonitor(image,progress_monitor,
image->client_data);
proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i,
number_images);
if (proceed == MagickFalse)
break;
image=GetNextImageInList(image);
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u a n t i z e E r r o r F l a t t e n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QuantizeErrorFlatten() traverses the color cube and flattens the quantization
% error into a sorted 1D array. This accelerates the color reduction process.
%
% Contributed by Yoya.
%
% The format of the QuantizeErrorFlatten method is:
%
% size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
% const NodeInfo *node_info,const ssize_t offset,
% MagickRealType *quantize_error)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is current pointer.
%
% o offset: quantize error offset.
%
% o quantize_error: the quantization error vector.
%
*/
static size_t QuantizeErrorFlatten(const CubeInfo *cube_info,
const NodeInfo *node_info,const ssize_t offset,
MagickRealType *quantize_error)
{
size_t
n,
number_children;
ssize_t
i;
if (offset >= (ssize_t) cube_info->nodes)
return(0);
quantize_error[offset]=node_info->quantize_error;
n=1;
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children ; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n,
quantize_error);
return(n);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Reduce() traverses the color cube tree and prunes any node whose
% quantization error falls below a particular threshold.
%
% The format of the Reduce method is:
%
% Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
%
% A description of each parameter follows.
%
% o cube_info: A pointer to the Cube structure.
%
% o node_info: pointer to node in color cube tree that is to be pruned.
%
*/
static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info)
{
size_t
number_children;
ssize_t
i;
/*
Traverse any children.
*/
number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL;
for (i=0; i < (ssize_t) number_children; i++)
if (node_info->child[i] != (NodeInfo *) NULL)
Reduce(cube_info,node_info->child[i]);
if (node_info->quantize_error <= cube_info->pruning_threshold)
PruneChild(cube_info,node_info);
else
{
/*
Find minimum pruning threshold.
*/
if (node_info->number_unique > 0)
cube_info->colors++;
if (node_info->quantize_error < cube_info->next_threshold)
cube_info->next_threshold=node_info->quantize_error;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e d u c e I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReduceImageColors() repeatedly prunes the tree until the number of nodes
% with n2 > 0 is less than or equal to the maximum number of colors allowed
% in the output image. On any given iteration over the tree, it selects
% those nodes whose E value is minimal for pruning and merges their
% color statistics upward. It uses a pruning threshold, Ep, to govern
% node selection as follows:
%
% Ep = 0
% while number of nodes with (n2 > 0) > required maximum number of colors
% prune all nodes such that E <= Ep
% Set Ep to minimum E in remaining nodes
%
% This has the effect of minimizing any quantization error when merging
% two nodes together.
%
% When a node to be pruned has offspring, the pruning procedure invokes
% itself recursively in order to prune the tree from the leaves upward.
% n2, Sr, Sg, and Sb in a node being pruned are always added to the
% corresponding data in that node's parent. This retains the pruned
% node's color characteristics for later averaging.
%
% For each node, n2 pixels exist for which that node represents the
% smallest volume in RGB space containing those pixel's colors. When n2
% > 0 the node will uniquely define a color in the output image. At the
% beginning of reduction, n2 = 0 for all nodes except a the leaves of
% the tree which represent colors present in the input image.
%
% The other pixel count, n1, indicates the total number of colors
% within the cubic volume which the node represents. This includes n1 -
% n2 pixels whose colors should be defined by nodes at a lower level in
% the tree.
%
% The format of the ReduceImageColors method is:
%
% ReduceImageColors(const Image *image,CubeInfo *cube_info)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cube_info: A pointer to the Cube structure.
%
*/
static int MagickRealTypeCompare(const void *error_p,const void *error_q)
{
MagickRealType
*p,
*q;
p=(MagickRealType *) error_p;
q=(MagickRealType *) error_q;
if (*p > *q)
return(1);
if (fabs((double) (*q-*p)) <= MagickEpsilon)
return(0);
return(-1);
}
static void ReduceImageColors(const Image *image,CubeInfo *cube_info)
{
#define ReduceImageTag "Reduce/Image"
MagickBooleanType
proceed;
MagickOffsetType
offset;
size_t
span;
cube_info->next_threshold=0.0;
if (cube_info->colors > cube_info->maximum_colors)
{
MagickRealType
*quantize_error;
/*
Enable rapid reduction of the number of unique colors.
*/
quantize_error=(MagickRealType *) AcquireQuantumMemory(cube_info->nodes,
sizeof(*quantize_error));
if (quantize_error != (MagickRealType *) NULL)
{
(void) QuantizeErrorFlatten(cube_info,cube_info->root,0,
quantize_error);
qsort(quantize_error,cube_info->nodes,sizeof(MagickRealType),
MagickRealTypeCompare);
if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100))
cube_info->next_threshold=quantize_error[cube_info->nodes-110*
(cube_info->maximum_colors+1)/100];
quantize_error=(MagickRealType *) RelinquishMagickMemory(
quantize_error);
}
}
for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; )
{
cube_info->pruning_threshold=cube_info->next_threshold;
cube_info->next_threshold=cube_info->root->quantize_error-1;
cube_info->colors=0;
Reduce(cube_info,cube_info->root);
offset=(MagickOffsetType) span-cube_info->colors;
proceed=SetImageProgress(image,ReduceImageTag,offset,span-
cube_info->maximum_colors+1);
if (proceed == MagickFalse)
break;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImage() replaces the colors of an image with the closest color from
% a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
% Image *image,const Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o image: the image.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info,
Image *image,const Image *remap_image)
{
CubeInfo
*cube_info;
MagickBooleanType
status;
/*
Initialize color cube.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(remap_image != (Image *) NULL);
assert(remap_image->signature == MagickCoreSignature);
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
status=AssignImageColors(image,cube_info);
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m a p I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemapImages() replaces the colors of a sequence of images with the
% closest color from a reference image.
%
% The format of the RemapImage method is:
%
% MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
% Image *images,Image *remap_image)
%
% A description of each parameter follows:
%
% o quantize_info: Specifies a pointer to an QuantizeInfo structure.
%
% o images: the image sequence.
%
% o remap_image: the reference image.
%
*/
MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info,
Image *images,const Image *remap_image)
{
CubeInfo
*cube_info;
Image
*image;
MagickBooleanType
status;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
if (remap_image == (Image *) NULL)
{
/*
Create a global colormap for an image sequence.
*/
status=QuantizeImages(quantize_info,images);
return(status);
}
/*
Classify image colors from the reference image.
*/
cube_info=GetCubeInfo(quantize_info,MaxTreeDepth,
quantize_info->number_colors);
if (cube_info == (CubeInfo *) NULL)
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ClassifyImageColors(cube_info,remap_image,&image->exception);
if (status != MagickFalse)
{
/*
Classify image colors from the reference image.
*/
cube_info->quantize_info->number_colors=cube_info->colors;
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
status=AssignImageColors(image,cube_info);
if (status == MagickFalse)
break;
}
}
DestroyCubeInfo(cube_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetGrayscaleImage() converts an image to a PseudoClass grayscale image.
%
% The format of the SetGrayscaleImage method is:
%
% MagickBooleanType SetGrayscaleImage(Image *image)
%
% A description of each parameter follows:
%
% o image: The image.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
double
intensity;
PixelPacket
*color_1,
*color_2;
color_1=(PixelPacket *) x;
color_2=(PixelPacket *) y;
intensity=PixelPacketIntensity(color_1)-PixelPacketIntensity(color_2);
if (intensity < (double) INT_MIN)
intensity=(double) INT_MIN;
if (intensity > (double) INT_MAX)
intensity=(double) INT_MAX;
return((int) intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static MagickBooleanType SetGrayscaleImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
PixelPacket
*colormap;
size_t
extent;
ssize_t
*colormap_index,
i,
j,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
exception=(&image->exception);
if (image->type != GrayscaleType)
(void) TransformImageColorspace(image,GRAYColorspace);
extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1));
colormap_index=(ssize_t *) AcquireQuantumMemory(extent,
sizeof(*colormap_index));
if (colormap_index == (ssize_t *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
if (image->storage_class != PseudoClass)
{
(void) memset(colormap_index,(-1),extent*sizeof(*colormap_index));
if (AcquireImageColormap(image,MaxColormapSize) == MagickFalse)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
image->colors=0;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
intensity;
intensity=ScaleQuantumToMap(GetPixelRed(q));
if (colormap_index[intensity] < 0)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetGrayscaleImage)
#endif
if (colormap_index[intensity] < 0)
{
colormap_index[intensity]=(ssize_t) image->colors;
image->colormap[image->colors].red=GetPixelRed(q);
image->colormap[image->colors].green=GetPixelGreen(q);
image->colormap[image->colors].blue=GetPixelBlue(q);
image->colors++;
}
}
SetPixelIndex(indexes+x,colormap_index[intensity]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
}
(void) memset(colormap_index,0,extent*sizeof(*colormap_index));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].opacity=(Quantum) i;
qsort((void *) image->colormap,image->colors,sizeof(PixelPacket),
IntensityCompare);
colormap=(PixelPacket *) AcquireQuantumMemory(image->colors,
sizeof(*colormap));
if (colormap == (PixelPacket *) NULL)
{
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
j=0;
colormap[j]=image->colormap[0];
for (i=0; i < (ssize_t) image->colors; i++)
{
if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse)
{
j++;
colormap[j]=image->colormap[i];
}
colormap_index[(ssize_t) image->colormap[i].opacity]=j;
}
image->colors=(size_t) (j+1);
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
image->colormap=colormap;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex(
indexes+x))]);
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index);
image->type=GrayscaleType;
if (SetImageMonochrome(image,&image->exception) != MagickFalse)
image->type=BilevelType;
return(status);
}
|
conv_dw_kernel_rv64.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Parts of the following code in this file refs to
* https://github.com/Tencent/ncnn/blob/master/src/layer/arm/convolutiondepthwise_5x5.h
* Tencent is pleased to support the open source community by making ncnn
* available.
*
* Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
*
* Licensed under the BSD 3-Clause License (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the
* License at
*
* https://opensource.org/licenses/BSD-3-Clause
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "conv_dw_kernel_rv64.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
static void relu(float* data, int size, int activation)
{
for (int i = 0; i < size; i++)
{
data[i] = max(data[i], ( float )0);
if (activation > 0)
{
data[i] = min(data[i], ( float )activation);
}
}
}
static void pad(float* input, float* output, int in_h, int in_w, int out_h, int out_w, int top, int left, float v)
{
float* ptr = input;
float* outptr = output;
int y = 0;
// fill top
for (; y < top; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
// fill center
for (; y < (top + in_h); y++)
{
int x = 0;
for (; x < left; x++)
{
outptr[x] = v;
}
if (in_w < 12)
{
for (; x < (left + in_w); x++)
{
outptr[x] = ptr[x - left];
}
}
else
{
memcpy(outptr + left, ptr, in_w * sizeof(float));
x += in_w;
}
for (; x < out_w; x++)
{
outptr[x] = v;
}
ptr += in_w;
outptr += out_w;
}
// fill bottom
for (; y < out_h; y++)
{
int x = 0;
for (; x < out_w; x++)
{
outptr[x] = v;
}
outptr += out_w;
}
}
static void convdw3x3s1(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w,
int out_h, int out_w, int num_thread)
{
int w = in_w;
int h = in_h;
int c_step_in = w * h;
int outw = out_w;
int outh = out_h;
int c_step_out = outw * outh;
const int group = channel;
const float* kernel = _kernel;
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < group; g++)
{
float* out = output + g * c_step_out;
float* outptr = out;
float* outptr2 = outptr + outw;
const float bias0 = _bias ? _bias[g] : 0.f;
const float* kernel0 = kernel + g * 9;
const float* img0 = input + g * c_step_in;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
float sum2 = bias0;
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr = sum;
*outptr2 = sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w,
int out_h, int out_w, int num_thread)
{
int w = in_w;
int h = in_h;
int c_step_in = w * h;
int outw = out_w;
int outh = out_h;
int c_step_out = outw * outh;
const int group = channel;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < group; g++)
{
float* out = output + g * c_step_out;
float* outptr = out;
const float* kernel0 = kernel + g * 9;
const float bias0 = _bias ? _bias[g] : 0.f;
const float* img0 = input + g * c_step_in;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
static void convdw5x5s1(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w,
int out_h, int out_w, int num_thread)
{
int w = in_w;
int h = in_h;
int c_step_in = w * h;
int outw = out_w;
int outh = out_h;
int c_step_out = outw * outh;
const int group = channel;
const float* kernel = _kernel;
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < group; g++)
{
float* out = output + g * c_step_out;
float* outptr = out;
float* outptr2 = outptr + outw;
const float bias0 = _bias ? _bias[g] : 0.f;
const float* kernel0 = kernel + g * 25;
const float* img0 = input + g * c_step_in;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* r4 = img0 + w * 4;
const float* r5 = img0 + w * 5;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
float sum2 = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r1[3] * k0[3];
sum2 += r1[4] * k0[4];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r2[3] * k1[3];
sum2 += r2[4] * k1[4];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
sum2 += r3[3] * k2[3];
sum2 += r3[4] * k2[4];
sum2 += r4[0] * k3[0];
sum2 += r4[1] * k3[1];
sum2 += r4[2] * k3[2];
sum2 += r4[3] * k3[3];
sum2 += r4[4] * k3[4];
sum2 += r5[0] * k4[0];
sum2 += r5[1] * k4[1];
sum2 += r5[2] * k4[2];
sum2 += r5[3] * k4[3];
sum2 += r5[4] * k4[4];
*outptr = sum;
*outptr2 = sum2;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
outptr++;
outptr2++;
}
r0 += 4 + w;
r1 += 4 + w;
r2 += 4 + w;
r3 += 4 + w;
r4 += 4 + w;
r5 += 4 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
*outptr = sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
}
}
static void convdw5x5s2(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w,
int out_h, int out_w, int num_thread)
{
int w = in_w;
int h = in_h;
int c_step_in = w * h;
int outw = out_w;
int outh = out_h;
int c_step_out = outw * outh;
const int group = channel;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < group; g++)
{
float* out = output + g * c_step_out;
float* outptr = out;
const float* kernel0 = kernel + g * 25;
const float bias0 = _bias ? _bias[g] : 0.f;
const float* img0 = input + g * c_step_in;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* r4 = img0 + w * 4;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
int conv_dw_run(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor,
struct tensor* output_tensor, struct conv_priv_info* conv_info, struct conv_param* param, int num_thread, int cpu_affinity)
{
float* input = ( float* )input_tensor->data;
float* output = ( float* )output_tensor->data;
float* kernel = ( float* )weight_tensor->data;
float* biases = NULL;
if (bias_tensor)
biases = ( float* )bias_tensor->data;
int batch_number = input_tensor->dims[0];
int inc = input_tensor->dims[1];
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int in_chw = inc * inh * inw;
int outc = output_tensor->dims[1];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
int out_hw = outh * outw;
int out_chw = out_hw * outc;
int ksize_h = param->kernel_h;
int ksize_w = param->kernel_w;
int pad_w = param->pad_w0;
int pad_h = param->pad_h0;
int stride_w = param->stride_w;
int stride_h = param->stride_h;
int dilation_w = param->dilation_w;
int dilation_h = param->dilation_h;
int group = param->group;
int activation = param->activation;
/* pading */
int inh_tmp = inh + pad_h + pad_h;
int inw_tmp = inw + pad_w + pad_w;
float* input_tmp = NULL;
if (inh_tmp == inh && inw_tmp == inw)
input_tmp = input;
else
{
input_tmp = ( float* )sys_malloc(inh_tmp * inw_tmp * group * sizeof(float));
#pragma omp parallel for num_threads(num_thread)
for (int g = 0; g < group; g++)
{
float* pad_in = input + g * inh * inw;
float* pad_out = input_tmp + g * inh_tmp * inw_tmp;
pad(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0.f);
}
}
/* process */
for (int i = 0; i < batch_number; i++)
{
if (ksize_h ==3 && stride_h == 1)
convdw3x3s1(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread);
else if (ksize_h ==3 && stride_h == 2)
convdw3x3s2(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread);
else if (ksize_h ==5 && stride_h == 1)
convdw5x5s1(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread);
else if (ksize_h ==5 && stride_h == 2)
convdw5x5s2(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread);
else
TLOG_ERR("convdw %d x %d, s %d not support.\n", ksize_h, ksize_w, stride_h);
}
/* relu */
if (activation >= 0)
relu(output, batch_number * out_chw, activation);
if (!(inh_tmp == inh && inw_tmp == inw))
sys_free(input_tmp);
return 0;
}
|
sort.c | /*
* sort.c
*
* @author: phdenzel
*
* Quicksort algorithms for DYAMA
*
* Parallel quicksort can cause bus error 10, because
* some systems set the non-main stack size to < 1MB,
* which is reached at N ~ 4096
*
*/
#include "sort.h"
#include "world.h"
void swapP(particle_t *p1, particle_t *p2) {
// swap particle pointers
particle_t temp;
temp = *p1;
*p1 = *p2;
*p2 = temp;
}
void swap(universe *u, int i, int j) {
// swap elements i and j in particles
particle_t temp;
temp = u->particles[i];
u->particles[i] = u->particles[j];
u->particles[j] = temp;
}
int xpartition(universe *u, int left, int right) {
// partition particles from left to right in x
double pivot = u->particles[left].r.x;
int i = left;
int j = right+1;
while (1) {
do ++i; while (u->particles[i].r.x <= pivot && i <= right);
do --j; while (u->particles[j].r.x > pivot);
if (i >= j) break;
swap(u, i, j);
}
swap(u, left, j);
return j;
}
int ypartition(universe *u, int left, int right) {
// partition particles from left to right in y
double pivot = u->particles[left].r.y;
int i = left;
int j = right+1;
while (1) {
do ++i; while (u->particles[i].r.y <= pivot && i <= right);
do --j; while (u->particles[j].r.y > pivot);
if (i >= j) break;
swap(u, i, j);
}
swap(u, left, j);
return j;
}
int zpartition(universe *u, int left, int right) {
// partition particles from left to right in y
double pivot = u->particles[left].r.z;
int i = left;
int j = right+1;
while (1) {
do ++i; while (u->particles[i].r.z <= pivot && i <= right);
do --j; while (u->particles[j].r.z > pivot);
if (i >= j) break;
swap(u, i, j);
}
swap(u, left, j);
return j;
}
void xQuickSort(universe *u, int left, int right) {
// sort particles quickly though
int j;
if (left < right) {
// divide and conquer
j = xpartition(u, left, right);
#pragma omp parallel sections
{
#pragma omp section
{
xQuickSort(u, left, j-1);
}
#pragma omp section
{
xQuickSort(u, j+1, right);
}
}
}
}
void yQuickSort(universe *u, int left, int right) {
// sort particles quickly though
int j;
if (left < right) {
// divide and conquer
j = ypartition(u, left, right);
#pragma omp parallel sections
{
#pragma omp section
{
yQuickSort(u, left, j-1);
}
#pragma omp section
{
yQuickSort(u, j+1, right);
}
}
}
}
void zQuickSort(universe *u, int left, int right) {
// sort particles quickly though
int j;
if (left < right) {
// divide and conquer
j = zpartition(u, left, right);
#pragma omp parallel sections
{
#pragma omp section
{
zQuickSort(u, left, j-1);
}
#pragma omp section
{
zQuickSort(u, j+1, right);
}
}
}
}
|
soma_clustering.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & Newcastle University for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
//
// This model examplifies the use of extracellur diffusion and shows
// how to extend the default "Cell". In step 0 one can see how an extra
// data member is added and can be accessed throughout the simulation with
// its Get and Set methods. N cells are randomly positioned in space, of which
// half are of type 1 and half of type -1. Each type secretes a different
// substance. Cells move towards the gradient of their own substance, which
// results in clusters being formed of cells of the same type.
//
#ifndef DEMO_SOMA_CLUSTERING_H_
#define DEMO_SOMA_CLUSTERING_H_
#include <vector>
#include "biodynamo.h"
#include "my_cell.h"
#include "validation_criterion.h"
namespace bdm {
enum Substances { kSubstance0, kSubstance1 };
inline int Simulate(int argc, const char** argv) {
auto set_param = [](Param* param) {
// Create an artificial bound for the simulation space
param->bound_space = true;
param->min_bound = 0;
param->max_bound = 250;
param->unschedule_default_operations = {"mechanical forces"};
};
Simulation simulation(argc, argv, set_param);
// Define initial model
auto* param = simulation.GetParam();
int num_cells = 20000;
#pragma omp parallel
simulation.GetRandom()->SetSeed(4357);
// Define the substances that cells may secrete
// Order: substance_name, diffusion_coefficient, decay_constant, resolution
ModelInitializer::DefineSubstance(kSubstance0, "Substance_0", 0.5, 0.1, 20);
ModelInitializer::DefineSubstance(kSubstance1, "Substance_1", 0.5, 0.1, 20);
int cell_type = 1;
std::string substance_name = "Substance_0";
auto construct = [&cell_type, &substance_name](const Double3& position) {
auto* cell = new MyCell(position, cell_type);
cell->SetDiameter(10);
cell->AddBehavior(new Secretion(substance_name));
cell->AddBehavior(new Chemotaxis(substance_name, 5));
return cell;
};
// Construct num_cells/2 cells of type 0
ModelInitializer::CreateAgentsRandom(param->min_bound, param->max_bound,
num_cells / 2, construct);
// Construct num_cells/2 cells of type 1
cell_type = -1;
substance_name = "Substance_1";
ModelInitializer::CreateAgentsRandom(param->min_bound, param->max_bound,
num_cells / 2, construct);
// Run simulation for N timesteps
simulation.GetScheduler()->Simulate(1000);
// Check if criterion is met
double spatial_range = 5;
auto crit = GetCriterion(spatial_range, num_cells / 8);
if (crit) {
std::cout << "Simulation completed successfully!\n";
}
return !crit;
}
} // namespace bdm
#endif // DEMO_SOMA_CLUSTERING_H_
|
hecmw_malloc.c | /*****************************************************************************
* Copyright (c) 2019 FrontISTR Commons
* This software is released under the MIT License, see LICENSE.txt
*****************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include "hecmw_config.h"
#include "hecmw_util.h"
#include "hecmw_malloc.h"
struct malloc_info {
void *ptr;
size_t size;
char *file;
int line;
struct malloc_info *next;
};
static struct malloc_info *mainfo;
static int is_check_memleak;
static long mem_size;
static int auto_check = 1;
static int n_ptr;
static int add_info(void *ptr, size_t size, char *file, int line) {
static struct malloc_info *info;
int rtc;
HECMW_assert(ptr);
#pragma omp critical
{
info = malloc(sizeof(*info));
if (info == NULL) {
rtc = -1;
} else {
mem_size += size;
info->ptr = ptr;
info->size = size;
info->file = file;
info->line = line;
info->next = mainfo;
mainfo = info;
n_ptr++;
rtc = 0;
}
}
return rtc;
}
static int del_info(void *ptr) {
struct malloc_info *p, *q;
int rtc, i;
HECMW_assert(ptr);
#pragma omp critical
{
q = NULL;
for (p = mainfo, i = 0; p && p->ptr != ptr; p = (q = p)->next, i++) {
HECMW_assert(i < n_ptr);
}
if (p == NULL) {
rtc = -1; /* not found */
} else {
if (q == NULL) {
mainfo = p->next;
} else {
q->next = p->next;
}
mem_size -= p->size;
free(p);
n_ptr--;
rtc = 0;
}
}
return rtc;
}
static int change_info(void *ptrold, void *ptrnew, size_t sizenew, char *file,
int line) {
struct malloc_info *p;
long size;
int rtc, i;
HECMW_assert(ptrold);
HECMW_assert(ptrnew);
#pragma omp critical
{
for (p = mainfo, i = 0; p && p->ptr != ptrold; p = p->next, i++) {
HECMW_assert(i < n_ptr);
}
if (p == NULL) {
rtc = -1;
} else {
size = sizenew - p->size;
mem_size += size;
p->ptr = ptrnew;
p->size = sizenew;
p->file = file;
p->line = line;
rtc = 0;
}
}
return rtc;
}
int HECMW_list_meminfo(FILE *fp) {
int n;
struct malloc_info *p;
if (fp == NULL) fp = stdout;
n = 0;
for (p = mainfo; p; p = p->next) {
fprintf(fp, "HEC-MW memory info: %s:%d ptr=%p size=%d\n", p->file,
p->line, p->ptr, (int)p->size);
n++;
}
return n;
}
void HECMW_set_autocheck_memleak(int flag) { auto_check = flag ? 1 : 0; }
int HECMW_check_memleak(void) {
int n;
struct malloc_info *p;
if (mainfo == NULL) return 0; /* no memory leaks */
n = 0;
for (p = mainfo; p; p = p->next) {
fprintf(stderr,
"HEC-MW memory check: "
"A memory leak found at %s:%d ptr=%p size=%d\n",
p->file, p->line, p->ptr, (int)p->size);
n++;
}
fprintf(stderr,
"HEC-MW memory check: "
"%d memory leak%s found\n",
n, (n > 1) ? "s" : "");
return n;
}
static void check_memleak(void) { HECMW_check_memleak(); }
static int mark_check_memleak(void) {
if (!is_check_memleak) {
if (atexit(check_memleak) == -1) return -1;
is_check_memleak = 1;
}
return 0;
}
long HECMW_get_memsize(void) { return mem_size; }
void HECMW_free_(void *ptr, char *file, int line) {
if (ptr == NULL) return;
if (del_info(ptr)) {
HECMW_print_msg(HECMW_LOG_WARN, HECMW_UTIL_E9001,
"Not found allocated memory %p(%s:%d)\n", ptr, file, line);
}
free(ptr);
}
void *HECMW_malloc_(size_t size, char *file, int line) {
void *ptr = NULL;
ptr = malloc(size);
if (ptr == NULL) goto error;
if (add_info(ptr, size, file, line)) goto error;
if (auto_check) {
if (mark_check_memleak()) goto error;
}
return ptr;
error:
free(ptr);
return NULL;
}
void *HECMW_calloc_(size_t nmemb, size_t size, char *file, int line) {
void *ptr = NULL;
ptr = calloc(nmemb, size);
if (ptr == NULL) goto error;
if (add_info(ptr, nmemb * size, file, line)) goto error;
if (auto_check) {
if (mark_check_memleak()) goto error;
}
return ptr;
error:
free(ptr);
return NULL;
}
void *HECMW_realloc_(void *ptr, size_t size, char *file, int line) {
void *ptrnew;
ptrnew = realloc(ptr, size);
if (size == 0 && ptr != NULL) { /* same as free */
if (del_info(ptr)) {
HECMW_print_msg(HECMW_LOG_WARN, HECMW_UTIL_E9001,
"Not found registered memory %p(%s:%d)\n", ptr, file,
line);
}
return NULL;
}
if (ptr == NULL) { /* same as malloc(size) */
if (add_info(ptrnew, size, file, line)) return NULL;
} else {
if (ptr == ptrnew) {
if (change_info(ptr, ptrnew, size, file, line)) {
HECMW_print_msg(HECMW_LOG_WARN, HECMW_UTIL_E9001,
"Not found registered memory %p(%s:%d)\n", ptr, file,
line);
if (add_info(ptrnew, size, file, line)) return NULL;
}
} else {
if (del_info(ptr)) {
HECMW_print_msg(HECMW_LOG_WARN, HECMW_UTIL_E9001,
"Not found registered memory %p(%s:%d)\n", ptr, file,
line);
}
if (add_info(ptrnew, size, file, line)) goto error;
}
}
if (auto_check) {
if (mark_check_memleak()) goto error;
}
return ptrnew;
error:
return NULL;
}
char *HECMW_strdup_(const char *s, char *file, int line) {
char *str = NULL;
str = strdup(s);
if (str == NULL) goto error;
if (add_info(str, strlen(str) + 1, file, line)) goto error;
if (auto_check) {
if (mark_check_memleak()) goto error;
}
return str;
error:
free(str);
return NULL;
}
|
mpinpb.h |
//---------------------------------------------------------------------
//---------------------------------------------------------------------
#ifndef __MPINPB_H
#define __MPINPB_H
#ifdef G_MAIN
int node, no_nodes, total_nodes, root;
int active;
#else
extern int node, no_nodes, total_nodes, root;
extern int active;
#endif
#ifdef _OPENMP
#pragma omp threadprivate (node, no_nodes, total_nodes, root, active)
#endif
#endif
|
fio_old.c | #include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <byteswap.h>
#include <omp.h>
#include "fio.h"
/* NON-USER FUNCTION
*
* Swap 64bit double precision
* Big-Endian 2 Little-Endian Scheme */
inline static double
__bswap_64d(double val)
{
double val_;
size_t sz = sizeof(double);
size_t sz_ = sz - 1;
char * cval = (char *) &val;
char * cval_ = (char *) &val_;
/* Loop over the digits and flip them over:
swap them from left-to-right,
so that the most significant is transformed to a least significant
Big-Endian to Little-Endian
*/
uint32_t i;
for(i = 0; i < sz; i++) cval_[i] = cval[sz_- i];
for(i = 0; i < sz; i++) cval[i] = cval_[i];
return val;
}
static void
walkd(char *l, const char *h, double *b)
{
// loop over the fbuf and extract the data: each time shift 8-bytes
for(; l < h; l += sizeof(double))
{
// Extract 8-bytes
double k;
memcpy(&k, l, sizeof(double));
// big-endian to little-endian
// bytesswap.h does not have a byteswap function for double
// precision
// data type. As such. we implement our own function.
*(b++) = __bswap_64d(k);
}
}
static void
walkfbufd(char *l, const char *h, const size_t sz, char *b)
{
if(sz <= LIMIT_D) walkd(l, h, (double *)b);
else
{
size_t sz_ = sz / 2; // Middle size address
size_t sz__ = sz - sz_; // Middle size address
char *m = l + (sz_ * sizeof(double));
char *k = b + (sz_ * sizeof(double));
#pragma omp task
walkfbufd(l, m, sz_, b);
walkfbufd(m, h, sz__, k);
#pragma omp taskwait
}
}
static void
walki(char *l, const char *h, uint32_t *b)
{
// Loop over the fbuf and extract the data: each time shift with the
// size of unsigned int (4-bytes)
for(; l < h; l += sizeof(uint32_t))
{
// Extract 4-bytes from the file buffer
uint32_t k;
memcpy(&k, l, sizeof(uint32_t));
// Swap bytes from big-endian format into little-endian
*(b++) = __bswap_32(k); // This a GNU function: bytesswap.h
}
}
static void
walkfbufi(char *l, const char *h, const size_t sz, char *b)
{
if(sz <= LIMIT_I) walki(l, h, (uint32_t *)b);
else // Partition the data
{
size_t sz_ = sz / 2; // The middle address size
size_t sz__ = sz - sz_; // The size last address size
char *m = l + (sz_ * sizeof(uint32_t));
char *k = b + (sz_ * sizeof(uint32_t));
#pragma omp task
walkfbufi(l, m, sz_, b);
walkfbufi(m, h, sz__, k);
#pragma omp taskwait
}
}
/* USER FUNCTION
*
* The main function that can be called by users to partition the buffer
* so that we can walk through the file buffer in parallel with the
* help of OpenMP task-based parallelism
*
* Input:
* Low and high address
* Data type
* Buffer size
* Data buffer (for the results)
* */
void
walkfbuf(const struct wtbl *w, void *b)
{
#pragma omp parallel
{
#pragma omp single
{
switch(w->t)
{
case UINT: walkfbufi(w->l, w->h, w->sz, b);
break;
case DOUBLE: walkfbufd(w->l, w->h, w->sz, b);
break;
}
}
}
} |
divsufsort.c | /*
* divsufsort.c for libdivsufsort-lite
* Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*- Compiler specifics -*/
#ifdef __clang__
#pragma clang diagnostic ignored "-Wshorten-64-to-32"
#endif
#if defined(_MSC_VER)
# pragma warning(disable : 4244)
# pragma warning(disable : 4127) /* C4127 : Condition expression is constant */
#endif
/*- Dependencies -*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "divsufsort.h"
/*- Constants -*/
#if defined(INLINE)
# undef INLINE
#endif
#if !defined(INLINE)
# define INLINE __inline
#endif
#if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1)
# undef ALPHABET_SIZE
#endif
#if !defined(ALPHABET_SIZE)
# define ALPHABET_SIZE (256)
#endif
#define BUCKET_A_SIZE (ALPHABET_SIZE)
#define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE)
#if defined(SS_INSERTIONSORT_THRESHOLD)
# if SS_INSERTIONSORT_THRESHOLD < 1
# undef SS_INSERTIONSORT_THRESHOLD
# define SS_INSERTIONSORT_THRESHOLD (1)
# endif
#else
# define SS_INSERTIONSORT_THRESHOLD (8)
#endif
#if defined(SS_BLOCKSIZE)
# if SS_BLOCKSIZE < 0
# undef SS_BLOCKSIZE
# define SS_BLOCKSIZE (0)
# elif 32768 <= SS_BLOCKSIZE
# undef SS_BLOCKSIZE
# define SS_BLOCKSIZE (32767)
# endif
#else
# define SS_BLOCKSIZE (1024)
#endif
/* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */
#if SS_BLOCKSIZE == 0
# define SS_MISORT_STACKSIZE (96)
#elif SS_BLOCKSIZE <= 4096
# define SS_MISORT_STACKSIZE (16)
#else
# define SS_MISORT_STACKSIZE (24)
#endif
#define SS_SMERGE_STACKSIZE (32)
#define TR_INSERTIONSORT_THRESHOLD (8)
#define TR_STACKSIZE (64)
/*- Macros -*/
#ifndef SWAP
# define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0)
#endif /* SWAP */
#ifndef MIN
# define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
#endif /* MIN */
#ifndef MAX
# define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b))
#endif /* MAX */
#define STACK_PUSH(_a, _b, _c, _d)\
do {\
assert(ssize < STACK_SIZE);\
stack[ssize].a = (_a), stack[ssize].b = (_b),\
stack[ssize].c = (_c), stack[ssize++].d = (_d);\
} while(0)
#define STACK_PUSH5(_a, _b, _c, _d, _e)\
do {\
assert(ssize < STACK_SIZE);\
stack[ssize].a = (_a), stack[ssize].b = (_b),\
stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\
} while(0)
#define STACK_POP(_a, _b, _c, _d)\
do {\
assert(0 <= ssize);\
if(ssize == 0) { return; }\
(_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
(_c) = stack[ssize].c, (_d) = stack[ssize].d;\
} while(0)
#define STACK_POP5(_a, _b, _c, _d, _e)\
do {\
assert(0 <= ssize);\
if(ssize == 0) { return; }\
(_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
(_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\
} while(0)
#define BUCKET_A(_c0) bucket_A[(_c0)]
#if ALPHABET_SIZE == 256
#define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)])
#define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)])
#else
#define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)])
#define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)])
#endif
/*- Private Functions -*/
static const int lg_table[256] = {
-1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
};
#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
static INLINE
int
ss_ilg(int n) {
#if SS_BLOCKSIZE == 0
return (n & 0xffff0000) ?
((n & 0xff000000) ?
24 + lg_table[(n >> 24) & 0xff] :
16 + lg_table[(n >> 16) & 0xff]) :
((n & 0x0000ff00) ?
8 + lg_table[(n >> 8) & 0xff] :
0 + lg_table[(n >> 0) & 0xff]);
#elif SS_BLOCKSIZE < 256
return lg_table[n];
#else
return (n & 0xff00) ?
8 + lg_table[(n >> 8) & 0xff] :
0 + lg_table[(n >> 0) & 0xff];
#endif
}
#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
#if SS_BLOCKSIZE != 0
static const int sqq_table[256] = {
0, 16, 22, 27, 32, 35, 39, 42, 45, 48, 50, 53, 55, 57, 59, 61,
64, 65, 67, 69, 71, 73, 75, 76, 78, 80, 81, 83, 84, 86, 87, 89,
90, 91, 93, 94, 96, 97, 98, 99, 101, 102, 103, 104, 106, 107, 108, 109,
110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155,
156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168,
169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180,
181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191,
192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201,
202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211,
212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221,
221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230,
230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238,
239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247,
247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255
};
static INLINE
int
ss_isqrt(int x) {
int y, e;
if (x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; }
e = (x & 0xffff0000) ?
((x & 0xff000000) ?
24 + lg_table[(x >> 24) & 0xff] :
16 + lg_table[(x >> 16) & 0xff]) :
((x & 0x0000ff00) ?
8 + lg_table[(x >> 8) & 0xff] :
0 + lg_table[(x >> 0) & 0xff]);
if (e >= 16) {
y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7);
if (e >= 24) { y = (y + 1 + x / y) >> 1; }
y = (y + 1 + x / y) >> 1;
} else if (e >= 8) {
y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1;
} else {
return sqq_table[x] >> 4;
}
return (x < (y * y)) ? y - 1 : y;
}
#endif /* SS_BLOCKSIZE != 0 */
/*---------------------------------------------------------------------------*/
/* Compares two suffixes. */
static INLINE
int
ss_compare(const unsigned char *T,
const int *p1, const int *p2,
int depth) {
const unsigned char *U1, *U2, *U1n, *U2n;
for (U1 = T + depth + *p1,
U2 = T + depth + *p2,
U1n = T + *(p1 + 1) + 2,
U2n = T + *(p2 + 1) + 2;
(U1 < U1n) && (U2 < U2n) && (*U1 == *U2);
++U1, ++U2) {
}
return U1 < U1n ?
(U2 < U2n ? *U1 - *U2 : 1) :
(U2 < U2n ? -1 : 0);
}
/*---------------------------------------------------------------------------*/
#if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1)
/* Insertionsort for small size groups */
static
void
ss_insertionsort(const unsigned char *T, const int *PA,
int *first, int *last, int depth) {
int *i, *j;
int t;
int r;
for (i = last - 2; first <= i; --i) {
for (t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) {
do { *(j - 1) = *j; } while ((++j < last) && (*j < 0));
if (last <= j) { break; }
}
if (r == 0) { *j = ~*j; }
*(j - 1) = t;
}
}
#endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */
/*---------------------------------------------------------------------------*/
#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
static INLINE
void
ss_fixdown(const unsigned char *Td, const int *PA,
int *SA, int i, int size) {
int j, k;
int v;
int c, d, e;
for (v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
d = Td[PA[SA[k = j++]]];
if (d < (e = Td[PA[SA[j]]])) {
k = j;
d = e;
}
if (d <= c) { break; }
}
SA[i] = v;
}
/* Simple top-down heapsort. */
static
void
ss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) {
int i, m;
int t;
m = size;
if ((size % 2) == 0) {
m--;
if (Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); }
}
for (i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); }
if ((size % 2) == 0) {
SWAP(SA[0], SA[m]);
ss_fixdown(Td, PA, SA, 0, m);
}
for (i = m - 1; 0 < i; --i) {
t = SA[0], SA[0] = SA[i];
ss_fixdown(Td, PA, SA, 0, i);
SA[i] = t;
}
}
/*---------------------------------------------------------------------------*/
/* Returns the median of three elements. */
static INLINE
int *
ss_median3(const unsigned char *Td, const int *PA,
int *v1, int *v2, int *v3) {
int *t;
if (Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); }
if (Td[PA[*v2]] > Td[PA[*v3]]) {
if (Td[PA[*v1]] > Td[PA[*v3]]) { return v1; }
else { return v3; }
}
return v2;
}
/* Returns the median of five elements. */
static INLINE
int *
ss_median5(const unsigned char *Td, const int *PA,
int *v1, int *v2, int *v3, int *v4, int *v5) {
int *t;
if (Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); }
if (Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); }
if (Td[PA[*v2]] > Td[PA[*v4]]) {
SWAP(v2, v4);
SWAP(v3, v5);
}
if (Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); }
if (Td[PA[*v1]] > Td[PA[*v4]]) {
SWAP(v1, v4);
SWAP(v3, v5);
}
if (Td[PA[*v3]] > Td[PA[*v4]]) { return v4; }
return v3;
}
/* Returns the pivot element. */
static INLINE
int *
ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) {
int *middle;
int t;
t = last - first;
middle = first + t / 2;
if (t <= 512) {
if (t <= 32) {
return ss_median3(Td, PA, first, middle, last - 1);
} else {
t >>= 2;
return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1);
}
}
t >>= 3;
first = ss_median3(Td, PA, first, first + t, first + (t << 1));
middle = ss_median3(Td, PA, middle - t, middle, middle + t);
last = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1);
return ss_median3(Td, PA, first, middle, last);
}
/*---------------------------------------------------------------------------*/
/* Binary partition for substrings. */
static INLINE
int *
ss_partition(const int *PA,
int *first, int *last, int depth) {
int *a, *b;
int t;
for (a = first - 1, b = last;;) {
for (; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; }
for (; (a < --b) && ((PA[*b] + depth) < (PA[*b + 1] + 1));) {}
if (b <= a) { break; }
t = ~*b;
*b = *a;
*a = t;
}
if (first < a) { *first = ~*first; }
return a;
}
/* Multikey introsort for medium size groups. */
static
void
ss_mintrosort(const unsigned char *T, const int *PA,
int *first, int *last,
int depth) {
#define STACK_SIZE SS_MISORT_STACKSIZE
struct {
int *a, *b, c;
int d;
} stack[STACK_SIZE];
const unsigned char *Td;
int *a, *b, *c, *d, *e, *f;
int s, t;
int ssize;
int limit;
int v, x = 0;
for (ssize = 0, limit = ss_ilg(last - first);;) {
if ((last - first) <= SS_INSERTIONSORT_THRESHOLD) {
#if 1 < SS_INSERTIONSORT_THRESHOLD
if (1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); }
#endif
STACK_POP(first, last, depth, limit);
continue;
}
Td = T + depth;
if (limit-- == 0) { ss_heapsort(Td, PA, first, last - first); }
if (limit < 0) {
for (a = first + 1, v = Td[PA[*first]]; a < last; ++a) {
if ((x = Td[PA[*a]]) != v) {
if (1 < (a - first)) { break; }
v = x;
first = a;
}
}
if (Td[PA[*first] - 1] < v) {
first = ss_partition(PA, first, a, depth);
}
if ((a - first) <= (last - a)) {
if (1 < (a - first)) {
STACK_PUSH(a, last, depth, -1);
last = a, depth += 1, limit = ss_ilg(a - first);
} else {
first = a, limit = -1;
}
} else {
if (1 < (last - a)) {
STACK_PUSH(first, a, depth + 1, ss_ilg(a - first));
first = a, limit = -1;
} else {
last = a, depth += 1, limit = ss_ilg(a - first);
}
}
continue;
}
/* choose pivot */
a = ss_pivot(Td, PA, first, last);
v = Td[PA[*a]];
SWAP(*first, *a);
/* partition */
for (b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) {}
if (((a = b) < last) && (x < v)) {
for (; (++b < last) && ((x = Td[PA[*b]]) <= v);) {
if (x == v) {
SWAP(*b, *a);
++a;
}
}
}
for (c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) {}
if ((b < (d = c)) && (x > v)) {
for (; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
if (x == v) {
SWAP(*c, *d);
--d;
}
}
}
for (; b < c;) {
SWAP(*b, *c);
for (; (++b < c) && ((x = Td[PA[*b]]) <= v);) {
if (x == v) {
SWAP(*b, *a);
++a;
}
}
for (; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
if (x == v) {
SWAP(*c, *d);
--d;
}
}
}
if (a <= d) {
c = b - 1;
if ((s = a - first) > (t = b - a)) { s = t; }
for (e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
if ((s = d - c) > (t = last - d - 1)) { s = t; }
for (e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
a = first + (b - a), c = last - (d - c);
b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth);
if ((a - first) <= (last - c)) {
if ((last - c) <= (c - b)) {
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
STACK_PUSH(c, last, depth, limit);
last = a;
} else if ((a - first) <= (c - b)) {
STACK_PUSH(c, last, depth, limit);
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
last = a;
} else {
STACK_PUSH(c, last, depth, limit);
STACK_PUSH(first, a, depth, limit);
first = b, last = c, depth += 1, limit = ss_ilg(c - b);
}
} else {
if ((a - first) <= (c - b)) {
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
STACK_PUSH(first, a, depth, limit);
first = c;
} else if ((last - c) <= (c - b)) {
STACK_PUSH(first, a, depth, limit);
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
first = c;
} else {
STACK_PUSH(first, a, depth, limit);
STACK_PUSH(c, last, depth, limit);
first = b, last = c, depth += 1, limit = ss_ilg(c - b);
}
}
} else {
limit += 1;
if (Td[PA[*first] - 1] < v) {
first = ss_partition(PA, first, last, depth);
limit = ss_ilg(last - first);
}
depth += 1;
}
}
#undef STACK_SIZE
}
#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
/*---------------------------------------------------------------------------*/
#if SS_BLOCKSIZE != 0
static INLINE
void
ss_blockswap(int *a, int *b, int n) {
int t;
for (; 0 < n; --n, ++a, ++b) {
t = *a, *a = *b, *b = t;
}
}
static INLINE
void
ss_rotate(int *first, int *middle, int *last) {
int *a, *b, t;
int l, r;
l = middle - first, r = last - middle;
for (; (0 < l) && (0 < r);) {
if (l == r) {
ss_blockswap(first, middle, l);
break;
}
if (l < r) {
a = last - 1, b = middle - 1;
t = *a;
do {
*a-- = *b, *b-- = *a;
if (b < first) {
*a = t;
last = a;
if ((r -= l + 1) <= l) { break; }
a -= 1, b = middle - 1;
t = *a;
}
} while (1);
} else {
a = first, b = middle;
t = *a;
do {
*a++ = *b, *b++ = *a;
if (last <= b) {
*a = t;
first = a + 1;
if ((l -= r + 1) <= r) { break; }
a += 1, b = middle;
t = *a;
}
} while (1);
}
}
}
/*---------------------------------------------------------------------------*/
static
void
ss_inplacemerge(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int depth) {
const int *p;
int *a, *b;
int len, half;
int q, r;
int x;
for (;;) {
if (*(last - 1) < 0) {
x = 1;
p = PA + ~*(last - 1);
}
else {
x = 0;
p = PA + *(last - 1);
}
for (a = first, len = middle - first, half = len >> 1, r = -1;
0 < len;
len = half, half >>= 1) {
b = a + half;
q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth);
if (q < 0) {
a = b + 1;
half -= (len & 1) ^ 1;
} else {
r = q;
}
}
if (a < middle) {
if (r == 0) { *a = ~*a; }
ss_rotate(a, middle, last);
last -= middle - a;
middle = a;
if (first == middle) { break; }
}
--last;
if (x != 0) { while (*--last < 0) {}}
if (middle == last) { break; }
}
}
/*---------------------------------------------------------------------------*/
/* Merge-forward with internal buffer. */
static
void
ss_mergeforward(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int *buf, int depth) {
int *a, *b, *c, *bufend;
int t;
int r;
bufend = buf + (middle - first) - 1;
ss_blockswap(buf, first, middle - first);
for (t = *(a = first), b = buf, c = middle;;) {
r = ss_compare(T, PA + *b, PA + *c, depth);
if (r < 0) {
do {
*a++ = *b;
if (bufend <= b) {
*bufend = t;
return;
}
*b++ = *a;
} while (*b < 0);
} else if (r > 0) {
do {
*a++ = *c, *c++ = *a;
if (last <= c) {
while (b < bufend) { *a++ = *b, *b++ = *a; }
*a = *b, *b = t;
return;
}
} while (*c < 0);
} else {
*c = ~*c;
do {
*a++ = *b;
if (bufend <= b) {
*bufend = t;
return;
}
*b++ = *a;
} while (*b < 0);
do {
*a++ = *c, *c++ = *a;
if (last <= c) {
while (b < bufend) { *a++ = *b, *b++ = *a; }
*a = *b, *b = t;
return;
}
} while (*c < 0);
}
}
}
/* Merge-backward with internal buffer. */
static
void
ss_mergebackward(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int *buf, int depth) {
const int *p1, *p2;
int *a, *b, *c, *bufend;
int t;
int r;
int x;
bufend = buf + (last - middle) - 1;
ss_blockswap(buf, middle, last - middle);
x = 0;
if (*bufend < 0) {
p1 = PA + ~*bufend;
x |= 1;
}
else { p1 = PA + *bufend; }
if (*(middle - 1) < 0) {
p2 = PA + ~*(middle - 1);
x |= 2;
}
else { p2 = PA + *(middle - 1); }
for (t = *(a = last - 1), b = bufend, c = middle - 1;;) {
r = ss_compare(T, p1, p2, depth);
if (0 < r) {
if (x & 1) {
do { *a-- = *b, *b-- = *a; } while (*b < 0);
x ^= 1;
}
*a-- = *b;
if (b <= buf) {
*buf = t;
break;
}
*b-- = *a;
if (*b < 0) {
p1 = PA + ~*b;
x |= 1;
}
else { p1 = PA + *b; }
} else if (r < 0) {
if (x & 2) {
do { *a-- = *c, *c-- = *a; } while (*c < 0);
x ^= 2;
}
*a-- = *c, *c-- = *a;
if (c < first) {
while (buf < b) { *a-- = *b, *b-- = *a; }
*a = *b, *b = t;
break;
}
if (*c < 0) {
p2 = PA + ~*c;
x |= 2;
}
else { p2 = PA + *c; }
} else {
if (x & 1) {
do { *a-- = *b, *b-- = *a; } while (*b < 0);
x ^= 1;
}
*a-- = ~*b;
if (b <= buf) {
*buf = t;
break;
}
*b-- = *a;
if (x & 2) {
do { *a-- = *c, *c-- = *a; } while (*c < 0);
x ^= 2;
}
*a-- = *c, *c-- = *a;
if (c < first) {
while (buf < b) { *a-- = *b, *b-- = *a; }
*a = *b, *b = t;
break;
}
if (*b < 0) {
p1 = PA + ~*b;
x |= 1;
}
else { p1 = PA + *b; }
if (*c < 0) {
p2 = PA + ~*c;
x |= 2;
}
else { p2 = PA + *c; }
}
}
}
/* D&C based merge. */
static
void
ss_swapmerge(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int *buf, int bufsize, int depth) {
#define STACK_SIZE SS_SMERGE_STACKSIZE
#define GETIDX(a) ((0 <= (a)) ? (a) : (~(a)))
#define MERGE_CHECK(a, b, c)\
do {\
if(((c) & 1) ||\
(((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\
*(a) = ~*(a);\
}\
if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\
*(b) = ~*(b);\
}\
} while(0)
struct {
int *a, *b, *c;
int d;
} stack[STACK_SIZE];
int *l, *r, *lm, *rm;
int m, len, half;
int ssize;
int check, next;
for (check = 0, ssize = 0;;) {
if ((last - middle) <= bufsize) {
if ((first < middle) && (middle < last)) {
ss_mergebackward(T, PA, first, middle, last, buf, depth);
}
MERGE_CHECK(first, last, check);
STACK_POP(first, middle, last, check);
continue;
}
if ((middle - first) <= bufsize) {
if (first < middle) {
ss_mergeforward(T, PA, first, middle, last, buf, depth);
}
MERGE_CHECK(first, last, check);
STACK_POP(first, middle, last, check);
continue;
}
for (m = 0, len = MIN(middle - first, last - middle), half = len >> 1;
0 < len;
len = half, half >>= 1) {
if (ss_compare(T, PA + GETIDX(*(middle + m + half)),
PA + GETIDX(*(middle - m - half - 1)), depth) < 0) {
m += half + 1;
half -= (len & 1) ^ 1;
}
}
if (0 < m) {
lm = middle - m, rm = middle + m;
ss_blockswap(lm, middle, m);
l = r = middle, next = 0;
if (rm < last) {
if (*rm < 0) {
*rm = ~*rm;
if (first < lm) {
for (; *--l < 0;) {}
next |= 4;
}
next |= 1;
} else if (first < lm) {
for (; *r < 0; ++r) {}
next |= 2;
}
}
if ((l - first) <= (last - r)) {
STACK_PUSH(r, rm, last, (next & 3) | (check & 4));
middle = lm, last = l, check = (check & 3) | (next & 4);
} else {
if ((next & 2) && (r == middle)) { next ^= 6; }
STACK_PUSH(first, lm, l, (check & 3) | (next & 4));
first = r, middle = rm, check = (next & 3) | (check & 4);
}
} else {
if (ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) {
*middle = ~*middle;
}
MERGE_CHECK(first, last, check);
STACK_POP(first, middle, last, check);
}
}
#undef STACK_SIZE
}
#endif /* SS_BLOCKSIZE != 0 */
/*---------------------------------------------------------------------------*/
/* Substring sort */
static
void
sssort(const unsigned char *T, const int *PA,
int *first, int *last,
int *buf, int bufsize,
int depth, int n, int lastsuffix) {
int *a;
#if SS_BLOCKSIZE != 0
int *b, *middle, *curbuf;
int j, k, curbufsize, limit;
#endif
int i;
if (lastsuffix != 0) { ++first; }
#if SS_BLOCKSIZE == 0
ss_mintrosort(T, PA, first, last, depth);
#else
if ((bufsize < SS_BLOCKSIZE) &&
(bufsize < (last - first)) &&
(bufsize < (limit = ss_isqrt(last - first)))) {
if (SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; }
buf = middle = last - limit, bufsize = limit;
} else {
middle = last, limit = 0;
}
for (a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) {
#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth);
#elif 1 < SS_BLOCKSIZE
ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth);
#endif
curbufsize = last - (a + SS_BLOCKSIZE);
curbuf = a + SS_BLOCKSIZE;
if (curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; }
for (b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) {
ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth);
}
}
#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
ss_mintrosort(T, PA, a, middle, depth);
#elif 1 < SS_BLOCKSIZE
ss_insertionsort(T, PA, a, middle, depth);
#endif
for (k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) {
if (i & 1) {
ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth);
a -= k;
}
}
if (limit != 0) {
#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
ss_mintrosort(T, PA, middle, last, depth);
#elif 1 < SS_BLOCKSIZE
ss_insertionsort(T, PA, middle, last, depth);
#endif
ss_inplacemerge(T, PA, first, middle, last, depth);
}
#endif
if (lastsuffix != 0) {
/* Insert last type B* suffix. */
int PAi[2];
PAi[0] = PA[*(first - 1)], PAi[1] = n - 2;
for (a = first, i = *(first - 1);
(a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth)));
++a) {
*(a - 1) = *a;
}
*(a - 1) = i;
}
}
/*---------------------------------------------------------------------------*/
static INLINE
int
tr_ilg(int n) {
return (n & 0xffff0000) ?
((n & 0xff000000) ?
24 + lg_table[(n >> 24) & 0xff] :
16 + lg_table[(n >> 16) & 0xff]) :
((n & 0x0000ff00) ?
8 + lg_table[(n >> 8) & 0xff] :
0 + lg_table[(n >> 0) & 0xff]);
}
/*---------------------------------------------------------------------------*/
/* Simple insertionsort for small size groups. */
static
void
tr_insertionsort(const int *ISAd, int *first, int *last) {
int *a, *b;
int t, r;
for (a = first + 1; a < last; ++a) {
for (t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) {
do { *(b + 1) = *b; } while ((first <= --b) && (*b < 0));
if (b < first) { break; }
}
if (r == 0) { *b = ~*b; }
*(b + 1) = t;
}
}
/*---------------------------------------------------------------------------*/
static INLINE
void
tr_fixdown(const int *ISAd, int *SA, int i, int size) {
int j, k;
int v;
int c, d, e;
for (v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
d = ISAd[SA[k = j++]];
if (d < (e = ISAd[SA[j]])) {
k = j;
d = e;
}
if (d <= c) { break; }
}
SA[i] = v;
}
/* Simple top-down heapsort. */
static
void
tr_heapsort(const int *ISAd, int *SA, int size) {
int i, m;
int t;
m = size;
if ((size % 2) == 0) {
m--;
if (ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); }
}
for (i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); }
if ((size % 2) == 0) {
SWAP(SA[0], SA[m]);
tr_fixdown(ISAd, SA, 0, m);
}
for (i = m - 1; 0 < i; --i) {
t = SA[0], SA[0] = SA[i];
tr_fixdown(ISAd, SA, 0, i);
SA[i] = t;
}
}
/*---------------------------------------------------------------------------*/
/* Returns the median of three elements. */
static INLINE
int *
tr_median3(const int *ISAd, int *v1, int *v2, int *v3) {
int *t;
if (ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); }
if (ISAd[*v2] > ISAd[*v3]) {
if (ISAd[*v1] > ISAd[*v3]) { return v1; }
else { return v3; }
}
return v2;
}
/* Returns the median of five elements. */
static INLINE
int *
tr_median5(const int *ISAd,
int *v1, int *v2, int *v3, int *v4, int *v5) {
int *t;
if (ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); }
if (ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); }
if (ISAd[*v2] > ISAd[*v4]) {
SWAP(v2, v4);
SWAP(v3, v5);
}
if (ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); }
if (ISAd[*v1] > ISAd[*v4]) {
SWAP(v1, v4);
SWAP(v3, v5);
}
if (ISAd[*v3] > ISAd[*v4]) { return v4; }
return v3;
}
/* Returns the pivot element. */
static INLINE
int *
tr_pivot(const int *ISAd, int *first, int *last) {
int *middle;
int t;
t = last - first;
middle = first + t / 2;
if (t <= 512) {
if (t <= 32) {
return tr_median3(ISAd, first, middle, last - 1);
} else {
t >>= 2;
return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1);
}
}
t >>= 3;
first = tr_median3(ISAd, first, first + t, first + (t << 1));
middle = tr_median3(ISAd, middle - t, middle, middle + t);
last = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1);
return tr_median3(ISAd, first, middle, last);
}
/*---------------------------------------------------------------------------*/
typedef struct _trbudget_t trbudget_t;
struct _trbudget_t {
int chance;
int remain;
int incval;
int count;
};
static INLINE
void
trbudget_init(trbudget_t *budget, int chance, int incval) {
budget->chance = chance;
budget->remain = budget->incval = incval;
}
static INLINE
int
trbudget_check(trbudget_t *budget, int size) {
if (size <= budget->remain) {
budget->remain -= size;
return 1;
}
if (budget->chance == 0) {
budget->count += size;
return 0;
}
budget->remain += budget->incval - size;
budget->chance -= 1;
return 1;
}
/*---------------------------------------------------------------------------*/
static INLINE
void
tr_partition(const int *ISAd,
int *first, int *middle, int *last,
int **pa, int **pb, int v) {
int *a, *b, *c, *d, *e, *f;
int t, s;
int x = 0;
for (b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) {}
if (((a = b) < last) && (x < v)) {
for (; (++b < last) && ((x = ISAd[*b]) <= v);) {
if (x == v) {
SWAP(*b, *a);
++a;
}
}
}
for (c = last; (b < --c) && ((x = ISAd[*c]) == v);) {}
if ((b < (d = c)) && (x > v)) {
for (; (b < --c) && ((x = ISAd[*c]) >= v);) {
if (x == v) {
SWAP(*c, *d);
--d;
}
}
}
for (; b < c;) {
SWAP(*b, *c);
for (; (++b < c) && ((x = ISAd[*b]) <= v);) {
if (x == v) {
SWAP(*b, *a);
++a;
}
}
for (; (b < --c) && ((x = ISAd[*c]) >= v);) {
if (x == v) {
SWAP(*c, *d);
--d;
}
}
}
if (a <= d) {
c = b - 1;
if ((s = a - first) > (t = b - a)) { s = t; }
for (e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
if ((s = d - c) > (t = last - d - 1)) { s = t; }
for (e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
first += (b - a), last -= (d - c);
}
*pa = first, *pb = last;
}
static
void
tr_copy(int *ISA, const int *SA,
int *first, int *a, int *b, int *last,
int depth) {
/* sort suffixes of middle partition
by using sorted order of suffixes of left and right partition. */
int *c, *d, *e;
int s, v;
v = b - SA - 1;
for (c = first, d = a - 1; c <= d; ++c) {
if ((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*++d = s;
ISA[s] = d - SA;
}
}
for (c = last - 1, e = d + 1, d = b; e < d; --c) {
if ((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*--d = s;
ISA[s] = d - SA;
}
}
}
static
void
tr_partialcopy(int *ISA, const int *SA,
int *first, int *a, int *b, int *last,
int depth) {
int *c, *d, *e;
int s, v;
int rank, lastrank, newrank = -1;
v = b - SA - 1;
lastrank = -1;
for (c = first, d = a - 1; c <= d; ++c) {
if ((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*++d = s;
rank = ISA[s + depth];
if (lastrank != rank) {
lastrank = rank;
newrank = d - SA;
}
ISA[s] = newrank;
}
}
lastrank = -1;
for (e = d; first <= e; --e) {
rank = ISA[*e];
if (lastrank != rank) {
lastrank = rank;
newrank = e - SA;
}
if (newrank != rank) { ISA[*e] = newrank; }
}
lastrank = -1;
for (c = last - 1, e = d + 1, d = b; e < d; --c) {
if ((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*--d = s;
rank = ISA[s + depth];
if (lastrank != rank) {
lastrank = rank;
newrank = d - SA;
}
ISA[s] = newrank;
}
}
}
static
void
tr_introsort(int *ISA, const int *ISAd,
int *SA, int *first, int *last,
trbudget_t *budget) {
#define STACK_SIZE TR_STACKSIZE
struct {
const int *a;
int *b, *c;
int d, e;
} stack[STACK_SIZE];
int *a, *b, *c;
int t;
int v, x = 0;
int incr = ISAd - ISA;
int limit, next;
int ssize, trlink = -1;
for (ssize = 0, limit = tr_ilg(last - first);;) {
if (limit < 0) {
if (limit == -1) {
/* tandem repeat partition */
tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1);
/* update ranks */
if (a < last) {
for (c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
}
if (b < last) {
for (c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; }
}
/* push */
if (1 < (b - a)) {
STACK_PUSH5(NULL, a, b, 0, 0);
STACK_PUSH5(ISAd - incr, first, last, -2, trlink);
trlink = ssize - 2;
}
if ((a - first) <= (last - b)) {
if (1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink);
last = a, limit = tr_ilg(a - first);
} else if (1 < (last - b)) {
first = b, limit = tr_ilg(last - b);
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
} else {
if (1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink);
first = b, limit = tr_ilg(last - b);
} else if (1 < (a - first)) {
last = a, limit = tr_ilg(a - first);
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
} else if (limit == -2) {
/* tandem repeat copy */
a = stack[--ssize].b, b = stack[ssize].c;
if (stack[ssize].d == 0) {
tr_copy(ISA, SA, first, a, b, last, ISAd - ISA);
} else {
if (0 <= trlink) { stack[trlink].d = -1; }
tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA);
}
STACK_POP5(ISAd, first, last, limit, trlink);
} else {
/* sorted partition */
if (0 <= *first) {
a = first;
do { ISA[*a] = a - SA; } while ((++a < last) && (0 <= *a));
first = a;
}
if (first < last) {
a = first;
do { *a = ~*a; } while (*++a < 0);
next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1;
if (++a < last) { for (b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; }}
/* push */
if (trbudget_check(budget, a - first)) {
if ((a - first) <= (last - a)) {
STACK_PUSH5(ISAd, a, last, -3, trlink);
ISAd += incr, last = a, limit = next;
} else {
if (1 < (last - a)) {
STACK_PUSH5(ISAd + incr, first, a, next, trlink);
first = a, limit = -3;
} else {
ISAd += incr, last = a, limit = next;
}
}
} else {
if (0 <= trlink) { stack[trlink].d = -1; }
if (1 < (last - a)) {
first = a, limit = -3;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
continue;
}
if ((last - first) <= TR_INSERTIONSORT_THRESHOLD) {
tr_insertionsort(ISAd, first, last);
limit = -3;
continue;
}
if (limit-- == 0) {
tr_heapsort(ISAd, first, last - first);
for (a = last - 1; first < a; a = b) {
for (x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; }
}
limit = -3;
continue;
}
/* choose pivot */
a = tr_pivot(ISAd, first, last);
SWAP(*first, *a);
v = ISAd[*first];
/* partition */
tr_partition(ISAd, first, first + 1, last, &a, &b, v);
if ((last - first) != (b - a)) {
next = (ISA[*a] != v) ? tr_ilg(b - a) : -1;
/* update ranks */
for (c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
if (b < last) { for (c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; }}
/* push */
if ((1 < (b - a)) && (trbudget_check(budget, b - a))) {
if ((a - first) <= (last - b)) {
if ((last - b) <= (b - a)) {
if (1 < (a - first)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
STACK_PUSH5(ISAd, b, last, limit, trlink);
last = a;
} else if (1 < (last - b)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
first = b;
} else {
ISAd += incr, first = a, last = b, limit = next;
}
} else if ((a - first) <= (b - a)) {
if (1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, limit, trlink);
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
last = a;
} else {
STACK_PUSH5(ISAd, b, last, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
STACK_PUSH5(ISAd, b, last, limit, trlink);
STACK_PUSH5(ISAd, first, a, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
if ((a - first) <= (b - a)) {
if (1 < (last - b)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
STACK_PUSH5(ISAd, first, a, limit, trlink);
first = b;
} else if (1 < (a - first)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
last = a;
} else {
ISAd += incr, first = a, last = b, limit = next;
}
} else if ((last - b) <= (b - a)) {
if (1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, limit, trlink);
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
first = b;
} else {
STACK_PUSH5(ISAd, first, a, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
STACK_PUSH5(ISAd, first, a, limit, trlink);
STACK_PUSH5(ISAd, b, last, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
}
} else {
if ((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; }
if ((a - first) <= (last - b)) {
if (1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, limit, trlink);
last = a;
} else if (1 < (last - b)) {
first = b;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
} else {
if (1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, limit, trlink);
first = b;
} else if (1 < (a - first)) {
last = a;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
}
} else {
if (trbudget_check(budget, last - first)) {
limit = tr_ilg(last - first), ISAd += incr;
} else {
if (0 <= trlink) { stack[trlink].d = -1; }
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
}
#undef STACK_SIZE
}
/*---------------------------------------------------------------------------*/
/* Tandem repeat sort */
static
void
trsort(int *ISA, int *SA, int n, int depth) {
int *ISAd;
int *first, *last;
trbudget_t budget;
int t, skip, unsorted;
trbudget_init(&budget, tr_ilg(n) * 2 / 3, n);
/* trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */
for (ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) {
first = SA;
skip = 0;
unsorted = 0;
do {
if ((t = *first) < 0) {
first -= t;
skip += t;
}
else {
if (skip != 0) {
*(first + skip) = skip;
skip = 0;
}
last = SA + ISA[t] + 1;
if (1 < (last - first)) {
budget.count = 0;
tr_introsort(ISA, ISAd, SA, first, last, &budget);
if (budget.count != 0) { unsorted += budget.count; }
else { skip = first - last; }
} else if ((last - first) == 1) {
skip = -1;
}
first = last;
}
} while (first < (SA + n));
if (skip != 0) { *(first + skip) = skip; }
if (unsorted == 0) { break; }
}
}
/*---------------------------------------------------------------------------*/
/* Sorts suffixes of type B*. */
static
int
sort_typeBstar(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int openMP) {
int *PAb, *ISAb, *buf;
#ifdef LIBBSC_OPENMP
int *curbuf;
int l;
#endif
int i, j, k, t, m, bufsize;
int c0, c1;
#ifdef LIBBSC_OPENMP
int d0, d1;
#endif
(void) openMP;
/* Initialize bucket arrays. */
for (i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }
for (i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }
/* Count the number of occurrences of the first one or two characters of each
type A, B and B* suffix. Moreover, store the beginning position of all
type B* suffixes into the array SA. */
for (i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {
/* type A suffix. */
do { ++BUCKET_A(c1 = c0); } while ((0 <= --i) && ((c0 = T[i]) >= c1));
if (0 <= i) {
/* type B* suffix. */
++BUCKET_BSTAR(c0, c1);
SA[--m] = i;
/* type B suffix. */
for (--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {
++BUCKET_B(c0, c1);
}
}
}
m = n - m;
/*
note:
A type B* suffix is lexicographically smaller than a type B suffix that
begins with the same first two characters.
*/
/* Calculate the index of start/end point of each bucket. */
for (c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
t = i + BUCKET_A(c0);
BUCKET_A(c0) = i + j; /* start point */
i = t + BUCKET_B(c0, c0);
for (c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
j += BUCKET_BSTAR(c0, c1);
BUCKET_BSTAR(c0, c1) = j; /* end point */
i += BUCKET_B(c0, c1);
}
}
if (0 < m) {
/* Sort the type B* suffixes by their first two characters. */
PAb = SA + n - m;
ISAb = SA + m;
for (i = m - 2; 0 <= i; --i) {
t = PAb[i], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = i;
}
t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = m - 1;
/* Sort the type B* substrings using sssort. */
#ifdef LIBBSC_OPENMP
if (openMP)
{
buf = SA + m;
c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;
#pragma omp parallel default(shared) private(bufsize, curbuf, k, l, d0, d1)
{
bufsize = (n - (2 * m)) / omp_get_num_threads();
curbuf = buf + omp_get_thread_num() * bufsize;
k = 0;
for(;;) {
#pragma omp critical(sssort_lock)
{
if(0 < (l = j)) {
d0 = c0, d1 = c1;
do {
k = BUCKET_BSTAR(d0, d1);
if(--d1 <= d0) {
d1 = ALPHABET_SIZE - 1;
if(--d0 < 0) { break; }
}
} while(((l - k) <= 1) && (0 < (l = k)));
c0 = d0, c1 = d1, j = k;
}
}
if(l == 0) { break; }
sssort(T, PAb, SA + k, SA + l,
curbuf, bufsize, 2, n, *(SA + k) == (m - 1));
}
}
}
else
{
buf = SA + m, bufsize = n - (2 * m);
for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
i = BUCKET_BSTAR(c0, c1);
if(1 < (j - i)) {
sssort(T, PAb, SA + i, SA + j,
buf, bufsize, 2, n, *(SA + i) == (m - 1));
}
}
}
}
#else
buf = SA + m, bufsize = n - (2 * m);
for (c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
for (c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
i = BUCKET_BSTAR(c0, c1);
if (1 < (j - i)) {
sssort(T, PAb, SA + i, SA + j,
buf, bufsize, 2, n, *(SA + i) == (m - 1));
}
}
}
#endif
/* Compute ranks of type B* substrings. */
for (i = m - 1; 0 <= i; --i) {
if (0 <= SA[i]) {
j = i;
do { ISAb[SA[i]] = i; } while ((0 <= --i) && (0 <= SA[i]));
SA[i + 1] = i - j;
if (i <= 0) { break; }
}
j = i;
do { ISAb[SA[i] = ~SA[i]] = j; } while (SA[--i] < 0);
ISAb[SA[i]] = j;
}
/* Construct the inverse suffix array of type B* suffixes using trsort. */
trsort(ISAb, SA, m, 1);
/* Set the sorted order of tyoe B* suffixes. */
for (i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
for (--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) {}
if (0 <= i) {
t = i;
for (--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {}
SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;
}
}
/* Calculate the index of start/end point of each bucket. */
BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */
for (c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
i = BUCKET_A(c0 + 1) - 1;
for (c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
t = i - BUCKET_B(c0, c1);
BUCKET_B(c0, c1) = i; /* end point */
/* Move all type B* suffixes to the correct position. */
for (i = t, j = BUCKET_BSTAR(c0, c1);
j <= k;
--i, --k) { SA[i] = SA[k]; }
}
BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */
BUCKET_B(c0, c0) = i; /* end point */
}
}
return m;
}
/* Constructs the suffix array by using the sorted order of type B* suffixes. */
static
void
construct_SA(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int m) {
int *i, *j, *k;
int s;
int c0, c1, c2;
if (0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for (c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for (i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if (0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
*j = ~s;
c0 = T[--s];
if ((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if (c0 != c2) {
if (0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else {
assert(((s == 0) && (T[s] == c1)) || (s < 0));
*j = ~s;
}
}
}
}
/* Construct the suffix array by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);
/* Scan the suffix array from left to right. */
for (i = SA, j = SA + n; i < j; ++i) {
if (0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
if ((s == 0) || (T[s - 1] < c0)) { s = ~s; }
if (c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else {
assert(s < 0);
*i = ~s;
}
}
}
/* Constructs the burrows-wheeler transformed string directly
by using the sorted order of type B* suffixes. */
static
int
construct_BWT(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int m) {
int *i, *j, *k, *orig;
int s;
int c0, c1, c2;
if (0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for (c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for (i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if (0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
c0 = T[--s];
*j = ~((int) c0);
if ((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if (c0 != c2) {
if (0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else if (s != 0) {
*j = ~s;
#ifndef NDEBUG
} else {
assert(T[s] == c1);
#endif
}
}
}
}
/* Construct the BWTed string by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~((int) T[n - 2]) : (n - 1);
/* Scan the suffix array from left to right. */
for (i = SA, j = SA + n, orig = SA; i < j; ++i) {
if (0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
*i = c0;
if ((0 < s) && (T[s - 1] < c0)) { s = ~((int) T[s - 1]); }
if (c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else if (s != 0) {
*i = ~s;
} else {
orig = i;
}
}
return orig - SA;
}
/* Constructs the burrows-wheeler transformed string directly
by using the sorted order of type B* suffixes. */
static
int
construct_BWT_indexes(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int m,
unsigned char *num_indexes, int *indexes) {
int *i, *j, *k, *orig;
int s;
int c0, c1, c2;
int mod = n / 8;
{
mod |= mod >> 1;
mod |= mod >> 2;
mod |= mod >> 4;
mod |= mod >> 8;
mod |= mod >> 16;
mod >>= 1;
*num_indexes = (unsigned char) ((n - 1) / (mod + 1));
}
if (0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for (c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for (i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if (0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = j - SA;
c0 = T[--s];
*j = ~((int) c0);
if ((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if (c0 != c2) {
if (0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else if (s != 0) {
*j = ~s;
#ifndef NDEBUG
} else {
assert(T[s] == c1);
#endif
}
}
}
}
/* Construct the BWTed string by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
if (T[n - 2] < c2) {
if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = k - SA;
*k++ = ~((int) T[n - 2]);
} else {
*k++ = n - 1;
}
/* Scan the suffix array from left to right. */
for (i = SA, j = SA + n, orig = SA; i < j; ++i) {
if (0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = i - SA;
c0 = T[--s];
*i = c0;
if (c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
if ((0 < s) && (T[s - 1] < c0)) {
if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = k - SA;
*k++ = ~((int) T[s - 1]);
} else
*k++ = s;
} else if (s != 0) {
*i = ~s;
} else {
orig = i;
}
}
return orig - SA;
}
/*---------------------------------------------------------------------------*/
/*- Function -*/
int
divsufsort(const unsigned char *T, int *SA, int n, int openMP) {
int *bucket_A, *bucket_B;
int m;
int err = 0;
/* Check arguments. */
if ((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }
else if (n == 0) { return 0; }
else if (n == 1) {
SA[0] = 0;
return 0;
}
else if (n == 2) {
m = (T[0] < T[1]);
SA[m ^ 1] = 0, SA[m] = 1;
return 0;
}
bucket_A = (int *) malloc(BUCKET_A_SIZE * sizeof(int));
bucket_B = (int *) malloc(BUCKET_B_SIZE * sizeof(int));
/* Suffixsort. */
if ((bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, SA, bucket_A, bucket_B, n, openMP);
construct_SA(T, SA, bucket_A, bucket_B, n, m);
} else {
err = -2;
}
free(bucket_B);
free(bucket_A);
return err;
}
int
divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char *num_indexes, int *indexes, int openMP) {
int *B;
int *bucket_A, *bucket_B;
int m, pidx, i;
/* Check arguments. */
if ((T == NULL) || (U == NULL) || (n < 0)) { return -1; }
else if (n <= 1) {
if (n == 1) { U[0] = T[0]; }
return n;
}
if ((B = A) == NULL) { B = (int *) malloc((size_t) (n + 1) * sizeof(int)); }
bucket_A = (int *) malloc(BUCKET_A_SIZE * sizeof(int));
bucket_B = (int *) malloc(BUCKET_B_SIZE * sizeof(int));
/* Burrows-Wheeler Transform. */
if ((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, B, bucket_A, bucket_B, n, openMP);
if (num_indexes == NULL || indexes == NULL) {
pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);
} else {
pidx = construct_BWT_indexes(T, B, bucket_A, bucket_B, n, m, num_indexes, indexes);
}
/* Copy to output string. */
U[0] = T[n - 1];
for (i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char) B[i]; }
for (i += 1; i < n; ++i) { U[i] = (unsigned char) B[i]; }
pidx += 1;
} else {
pidx = -2;
}
free(bucket_B);
free(bucket_A);
if (A == NULL) { free(B); }
return pidx;
}
|
polybench.c | /**
* polybench.c: This file is part of the PolyBench 3.0 test suite.
*
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sched.h>
#include <math.h>
#ifdef _OPENMP
# include <omp.h>
#endif
/* By default, collect PAPI counters on thread 0. */
#ifndef POLYBENCH_THREAD_MONITOR
# define POLYBENCH_THREAD_MONITOR 0
#endif
/* Total LLC cache size. By default 32+MB.. */
#ifndef POLYBENCH_CACHE_SIZE_KB
# define POLYBENCH_CACHE_SIZE_KB 32770
#endif
int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR;
double polybench_program_total_flops = 0;
#ifdef POLYBENCH_PAPI
# include <papi.h>
# define POLYBENCH_MAX_NB_PAPI_COUNTERS 96
char* _polybench_papi_eventlist[] = {
#include "papi_counters.list"
NULL
};
int polybench_papi_eventset;
int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS];
long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS];
#endif
/* Timer code (gettimeofday). */
double polybench_t_start, polybench_t_end;
/* Timer code (RDTSC). */
unsigned long long int polybench_c_start, polybench_c_end;
static
double rtclock()
{
#ifdef POLYBENCH_TIME
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, NULL);
if (stat != 0)
printf ("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
#else
return 0;
#endif
}
static
unsigned long long int rdtsc()
{
unsigned long long int ret = 0;
unsigned int cycles_lo;
unsigned int cycles_hi;
#ifdef POLYBENCH_CYCLE_ACCURATE_TIMER
__asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi));
ret = (unsigned long long int)cycles_hi << 32 | cycles_lo;
#endif
return ret;
}
void polybench_flush_cache()
{
int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double);
double* flush = (double*) calloc (cs, sizeof(double));
int i;
double tmp = 0.0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < cs; i++)
tmp += flush[i];
assert (tmp <= 10.0);
free (flush);
}
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
void polybench_linux_fifo_scheduler()
{
/* Use FIFO scheduler to limit OS interference. Program must be run
as root, and this works only for Linux kernels. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);
sched_setscheduler (0, SCHED_FIFO, &schedParam);
}
void polybench_linux_standard_scheduler()
{
/* Restore to standard scheduler policy. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER);
sched_setscheduler (0, SCHED_OTHER, &schedParam);
}
#endif
#ifdef POLYBENCH_PAPI
static
void test_fail(char *file, int line, char *call, int retval)
{
char buf[128];
memset(buf, '\0', sizeof(buf));
if (retval != 0)
fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line);
else
{
fprintf (stdout,"%-40s SKIPPED\n", file);
fprintf (stdout,"Line # %d\n", line);
}
if (retval == PAPI_ESYS)
{
sprintf (buf, "System error in %s", call);
perror (buf);
}
else if (retval > 0)
fprintf (stdout,"Error: %s\n", call);
else if (retval == 0)
fprintf (stdout,"Error: %s\n", call);
else
{
char errstring[PAPI_MAX_STR_LEN];
PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN);
fprintf (stdout,"Error in %s: %s\n", call, errstring);
}
fprintf (stdout,"\n");
if (PAPI_is_initialized ())
PAPI_shutdown ();
exit (1);
}
void polybench_papi_init()
{
# ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
if (omp_get_max_threads () < polybench_papi_counters_threadid)
polybench_papi_counters_threadid = omp_get_max_threads () - 1;
}
#pragma omp barrier
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
polybench_papi_eventset = PAPI_NULL;
if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT)
test_fail (__FILE__, __LINE__, "PAPI_library_init", retval);
if ((retval = PAPI_create_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval);
int k;
for (k = 0; _polybench_papi_eventlist[k]; ++k)
{
if ((retval =
PAPI_event_name_to_code (_polybench_papi_eventlist[k],
&(polybench_papi_eventlist[k])))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval);
}
polybench_papi_eventlist[k] = 0;
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_close()
{
# ifdef _OPENMP
#pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval);
if (PAPI_is_initialized ())
PAPI_shutdown ();
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
int polybench_papi_start_counter(int evid)
{
# ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache();
# endif
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval = 1;
char descr[PAPI_MAX_STR_LEN];
PAPI_event_info_t evinfo;
PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr);
if (PAPI_add_event (polybench_papi_eventset,
polybench_papi_eventlist[evid]) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_add_event", 1);
if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo)
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval);
if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_start", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
return 0;
}
void polybench_papi_stop_counter(int evid)
{
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
long_long values[1];
values[0] = 0;
if ((retval = PAPI_read (polybench_papi_eventset, &values[0]))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_read", retval);
if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_stop", retval);
polybench_papi_values[evid] = values[0];
if ((retval = PAPI_remove_event
(polybench_papi_eventset,
polybench_papi_eventlist[evid])) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_print()
{
int verbose = 0;
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num() == polybench_papi_counters_threadid)
{
#ifdef POLYBENCH_PAPI_VERBOSE
verbose = 1;
#endif
if (verbose)
printf ("On thread %d:\n", polybench_papi_counters_threadid);
#endif
int evid;
for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid)
{
if (verbose)
printf ("%s=", _polybench_papi_eventlist[evid]);
printf ("%llu ", polybench_papi_values[evid]);
if (verbose)
printf ("\n");
}
printf ("\n");
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
#endif
/* ! POLYBENCH_PAPI */
void polybench_prepare_instruments()
{
#ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_fifo_scheduler ();
#endif
}
void polybench_timer_start()
{
polybench_prepare_instruments ();
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_start = rtclock ();
#else
polybench_c_start = rdtsc ();
#endif
}
void polybench_timer_stop()
{
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_end = rtclock ();
#else
polybench_c_end = rdtsc ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_standard_scheduler ();
#endif
}
void polybench_timer_print()
{
#ifdef POLYBENCH_GFLOPS
if (__polybench_program_total_flops == 0)
{
printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n");
printf ("%0.6lf\n", polybench_t_end - polybench_t_start);
}
else
printf ("%0.2lf\n",
(__polybench_program_total_flops /
(double)(polybench_t_end - polybench_t_start)) / 1000000000);
#else
# ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
printf ("%0.6f\n", polybench_t_end - polybench_t_start);
# else
printf ("%Ld\n", polybench_c_end - polybench_c_start);
# endif
#endif
}
static
void *
xmalloc (size_t num)
{
void* new = NULL;
int ret = posix_memalign (&new, 32, num);
if (! new || ret)
{
fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory");
exit (1);
}
return new;
}
void* polybench_alloc_data(int n, int elt_size)
{
void* ret = xmalloc (n * elt_size);
return ret;
}
|
sections_construct.c | #include <omp.h>
#include <stdio.h>
void sum_num(int a,int b)
{
printf("Adding %d and %d gives %d on thread %d\n",a,b,a+b, omp_get_thread_num());
}
void sum_n_num(int n)
{
int i;
int sum=0;
for(i=0;i<=n;i++)
{
sum+=i;
}
printf("\nSum of first %d numbers is %d from thread %d\n",n,sum,omp_get_thread_num());
}
int main(int argc, char** argv[])
{
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
sum_num(2,3);
#pragma omp section
sum_n_num(5);
}
}
|
OpenMPWrapper.h | #ifndef PICMDK_OPENMPWRAPPER_H
#define PICMDK_OPENMPWRAPPER_H
/* This file should be included everywhere instead of <omp.h>.
If the code is build with OpenMP it just includes <omp.h>, otherwise it
provides wrappers for OpenMP functions and #pragma omp.
The code including this file can use OpenMP functions and
pragmas independently of whether it will be build with OpenMP or not. */
#ifdef _OPENMP
#include <omp.h>
namespace picmdk {
namespace utility {
inline bool useOpenMP() { return true; }
inline int getNumThreads() {
int numThreads = 0;
#pragma omp parallel
{
#pragma omp master
numThreads = omp_get_num_threads();
}
return numThreads;
}
} // namespace picmdk::utility
} // namespace picmdk
#else
typedef void* omp_lock_t;
inline int omp_get_max_threads() { return 1; }
inline int omp_get_thread_num() { return 0; }
inline int omp_get_num_threads() { return 1; }
inline void omp_init_lock(omp_lock_t *) {}
inline void omp_set_lock(omp_lock_t *) {}
inline void omp_unset_lock(omp_lock_t *) {}
inline void omp_destroy_lock(omp_lock_t *) {}
#pragma omp
namespace picmdk {
namespace utility {
inline bool useOpenMP() { return false; }
inline int getNumThreads() { return 1; }
} // namespace picmdk::utility
} // namespace picmdk
#endif
#ifdef _MSC_VER
#define collapse(N)
#endif
#endif
|
LSBasics.h | #include "graph.h"
#pragma once
struct DFSData {
int *id2dfs; //maps vertex ids to post-order numbers
int *dfs2id; //maps post-order numbers to vertex ids
int *id2parc; //maps vertex ids to parent arcs in the dfs tree
DFSData(int n) {
id2dfs = new int [n+1];
dfs2id = new int [n+1];
id2parc = new int [n+1];
}
~DFSData() {
delete [] id2parc;
delete [] dfs2id;
delete [] id2dfs;
//fprintf (stderr, "Deleted DFS data.\n");
//fflush(stderr);
}
};
class GlobalInfo {
EdgeCost bestfound;
int solved;
public:
int bbpruned; //true iff bb pruned at a node that was not yet solved
EdgeCost fixed; //hack because bb cannot handle fixed costs
//
EdgeCost UpdateBestFound(EdgeCost bf) {
double answer = bestfound;
if (bf != answer) {
#pragma omp critical
{
if (bf < bestfound) {
bestfound = bf;
fprintf (stderr, "[[[ %.2f ]]] ", bestfound);
}
answer = bestfound;
}
}
return answer;
}
inline bool IsSolved() {
return (solved!=0);
}
void MakeSolved() {
solved = 1;
}
GlobalInfo() {
bestfound = INFINITE_COST;
fixed = 0;
solved = 0;
bbpruned = 0;
}
};
class CutRecorder {
public:
vector<int> cutlist;
void Reset() {
cutlist.clear();
}
void Reset(int size) {
cutlist.reserve(size);
cutlist.clear();
}
inline void AddArc(int alabel) {
cutlist.push_back(alabel);
}
inline void CloseCut() {
cutlist.push_back(-1);
}
};
class GraphMapper {
private:
void Init() {
oldn = oldm = 0;
v2new = e2new = NULL;
}
public:
int *v2new;
int *e2new;
int oldn, oldm;
GraphMapper() {
Init();
}
void Reset(int _oldn, int _oldm) {
oldn = _oldn;
oldm = _oldm;
v2new = new int [oldn+1];
e2new = new int [oldm+1];
for (int e=1; e<=oldm; e++) e2new[e] = -1;
for (int v=1; v<=oldn; v++) v2new[v] = -1;
}
~GraphMapper() {
if (v2new) delete [] v2new;
if (e2new) delete [] e2new;
}
void Destroy() {
if (v2new) delete [] v2new;
if (e2new) delete [] e2new;
Init();
}
};
class Basics {
public:
static void ReportResults (FILE *file, const string &prefix, double seconds, EdgeCost solvalue, EdgeCost bestknown) {
fprintf (file, "%ssolution %.20f\n", prefix.c_str(), (double)solvalue);
fprintf(file, "%stimeus %.3f\n", prefix.c_str(), 1000000.0 * seconds);
fprintf(file, "%stimems %.6f\n", prefix.c_str(), 1000.0 * seconds);
fprintf(file, "%stimes %.9f\n", prefix.c_str(), seconds);
double ratio = (double)solvalue / (double)bestknown;
double error = ratio - 1;
fprintf(file, "%sratio %.20f\n", prefix.c_str(), ratio);
fprintf(file, "%serror %.20f\n", prefix.c_str(), error);
fprintf(file, "%spcterror %.20f\n", prefix.c_str(), 100.0 * error);
}
static void fatal (const string &msg) {
fprintf (stderr, "ERROR: %s.\n", msg.c_str());
fflush(stderr);
exit(-1);
}
// this is old; the terminal is not random
static int WrongPickRandomTerminal(Graph &g) {
//fprintf (stderr, "r");
int n = g.VertexCount();
for (int v=1; v<=n; v++) if (g.IsTerminal(v)) return v;
fatal ("could not find terminal");
return 0;
}
static int PickRandomTerminal(Graph &g) {
//fprintf (stderr, "r");
int n = g.VertexCount();
int count = 0;
int target = RFWRandom::getInteger(1,g.TerminalCount());
for (int v=1; v<=n; v++) {
if (g.IsTerminal(v)) {
if (++count == target) return v;
}
}
fatal ("could not find terminal");
return 0;
}
static int PickRandomTerminal(Graph &g, RFWLocalRandom &random) {
static bool first = true;
if (first) {
fprintf (stderr, "PICKRANDOMTERMINAL IS NOT PROPERLY SET.\n");
first = false;
}
int n = g.VertexCount();
int count = 0;
int target = random.GetInteger(1,g.TerminalCount());
for (int v=1; v<=n; v++) {
if (g.IsTerminal(v)) {
if (++count == target) return v;
}
}
fatal ("could not find terminal");
return 0;
}
/// <summary>
/// Perform DFS on the solution, numbering vertices in reverse post-order.
/// Returns the number of vertices visited.
/// </summary>
/// <param name="r">Root of DFS.</param>
/// <param name="solution">Current solution.</param>
/// <param name="dfs2id">Output: map from dfs number to id (-1 if not visited)</param>
/// <param name="id2dfs">Output: map from id to dfs number (-1 if not visited)</param>
/// <param name="id2parc">Output: map from it to parent arc (0 if not visited)</param>
static int DFS (Graph &g, int r, SteinerSolution &solution, DFSData &dfsdata, RFWStack<int> &stack) {
// this is a funny implementation of dfs: when we first scan a vertex, we simply add to the stack
// every nonscanned neighbor---even those that are already in the stack. This requires a stack of size m.
// WARNING! IF WE ARE ONLY SCANNING EDGES OF THE SOLUTION, THE SIZE IS N
int *id2dfs = dfsdata.id2dfs;
int *dfs2id = dfsdata.dfs2id;
int *id2parc = dfsdata.id2parc;
int n = g.VertexCount();
int m = g.EdgeCount();
stack.reset();
//id2dfs: -1:unreached 0:scanned >0:processed
for (int v=0; v<=n; v++) {
id2dfs[v] = -1; //everybody unreached, initially
dfs2id[v] = -1;
id2parc[v] = 0;
}
stack.push(r);
int nextdfs = 1;
while (!stack.isEmpty()) {
int v = stack.pop();
int vdfs = id2dfs[v];
if (vdfs > 0) {continue;} //vertex already processed: nothing else to do
//vertex already scanned, but with no label; we assign it a label now
if (vdfs == 0) {
id2dfs[v] = nextdfs;
dfs2id[nextdfs] = v;
nextdfs++;
continue;
}
//vertex not yet scanned: scan it, put it back on the stack (a label will be assigned later)
stack.push(v);
id2dfs[v] = 0;
//foreach (WeightedGraph.Arc arc in g.ArcEnumerator(v)) {
SPGArc *a, *end;
//for (int pa=g.GetStart(v); pa<g.GetEnd(v); pa++) {
for (g.GetBounds(v,a,end); a<end; a++) {
int alabel = a->label; //g.GetArcLabel(pa);
if (!solution.Contains(alabel)) continue;
int w = a->head; //g.GetArcHead(pa);//arc.head;
if (id2dfs[w] >= 0) continue; //w already scanned: no need to go there again
id2parc[w] = alabel;
stack.push(w);
}
}
return nextdfs - 1;
}
// add all vertices in the current solution to solnodes
// (vertices with incident edges)
static void MarkSolutionNodes(Graph &g, SteinerSolution &solution, UniverseSet &solnodes) {
int n = g.VertexCount();
for (int v=1; v<=n; v++) {
if (solution.GetDegree(v)>0) solnodes.Insert(v);
}
}
//mark the components containing the elements of the stack
static void MarkComponent(Graph &g, RFWStack<int> &stack, int *id2parc, UniverseSet &marked) {
//Console.Error.Write("+");
//Invariant: a vertex becomes marked when it is inserted into the stack
//A marked vertex is or was on the stack.
bool verbose = false;
if (verbose) fprintf (stderr, "Marking component from %d vertices; marked has %d.", stack.getNElements(), marked.Count());
//make invariants true for original vertices
for (int i = stack.getNElements(); i >= 1; i--) {
int v = stack.peek(i);
if (marked.Contains(v)) fprintf (stderr, "BAD");
marked.Insert(v);
}
int mcount = marked.Count();
//add all relevant tree children to the stack
while (!stack.isEmpty()) {
int v = stack.pop();
SPGArc *a, *end;
for (g.GetBounds(v,a,end); a<end; a++) {
//for (int pa=g.GetStart(v); pa<g.GetEnd(v); pa++) {
int w = a->head; //g.GetArcHead(pa);
if (id2parc[w]==a->label) { //g.GetArcLabel(pa)) {
if (marked.Insert(w)) {
//mcount ++;
stack.push(w);
}
}
}
/*
foreach (WeightedGraph.Arc arc in g.ArcEnumerator(v)) {
int w = arc.head;
if (id2parc[w]==arc.label) {
if (marked.Insert(w)) stack.Push(w);
}
}*/
}
mcount = marked.Count() - mcount;
if (verbose) fprintf(stderr, "%d elements marked", mcount);
/* if (verbose) {Console.Error.WriteLine(" {0} elements marked.", marked.Count());
foreach (int e in marked.ElementEnumerator()) {Console.Error.Write("+{0} ", e);}}*/
}
static void CheckSolution(Graph &g, SteinerSolution &solution) {
if (!InnerCheck(g, solution, false)) {
fatal ("Invalid solution");
}
}
static bool InnerCheck(Graph &g, SteinerSolution &solution, bool verbose) {
int n = g.VertexCount();
UniverseSet svertices(n);
UnionFind uf(n);
Basics::MarkSolutionNodes(g, solution, svertices);
int ncomp = svertices.Count();
UniverseSet terminals(n);
for (int t=1; t<=g.VertexCount(); t++) {
if (g.IsTerminal(t)) terminals.Insert(t);
}
//Console.Error.WriteLine("Term
//foreach (int e in solution.ElementEnumerator())
int m = g.EdgeCount();
int ecount = 0;
for (int e=1; e<=m; e++) {
if (!solution.Contains(e)) continue;
ecount ++;
int v, w;
g.GetEndpoints(e, v, w);
if (!svertices.Contains(v) || !svertices.Contains(w)) {
fprintf (stderr, "Edge %d=(%d,%d), membership %d %d.\n",
e, v, w, svertices.Contains(v), svertices.Contains(w));
fatal ("Inconsistent vertex membership in solution");
}
terminals.Remove(v);
terminals.Remove(w);
if (uf.Find(v) == uf.Find(w)) {
fprintf (stderr, "Vertices %d, %d already in the same component.", v, w);
fatal ("Solution has a cycle.");
} else {
uf.Union(v, w);
ncomp --;
}
}
if (terminals.Count() > 0) {
fprintf (stderr, "Terminals not in solution: %d.", terminals.Count());
fatal ("Missing terminals in solution.");
}
fprintf (stderr, "[CHECKING:n=%d:m=%d:%d components]", svertices.Count(), ecount, ncomp);
if (verbose) {
for (int v=1; v<=n; v++) {
if (svertices.Contains(v)) {
fprintf (stderr, "%d:%d ", v, uf.Find(v));
}
}
fprintf (stderr, "\n");
}
if (ecount != svertices.Count() - 1) return false;
else return true;
}
/// <summary>
/// Compute the Voronoi diagram of the current graph, given a set of bases
/// and maybe the perturbed cost of the edges.
/// </summary>
/// <param name="voronoi">Output: description of the Voronoi diagram</param>
/// <param name="baselist">List of bases.</param>
/// <param name="heap">Preallocated heap to be used in the computation (will be reset)</param>
/// <param name="pertcost">Edge costs (use original costs if null).</param>
static void ComputeVoronoi(Graph &g, VoronoiData &voronoi, UniverseSet &baselist, BinaryHeap<EdgeCost> &heap, EdgeCost *pertcost) {
const bool GLOBAL_USE_VORONOI_TIE_BREAKER = false; // NOT CLEAR WHERE THIS THING IS SUPPOSED TO BE DEFINED
const bool verbose = false;
voronoi.Reset();
int nbases = 0;
heap.Reset();
// initialize with all bases
int p, pend;
for (baselist.GetBounds(p,pend); p<pend; p++) {
int b = baselist.PickPos(p);
nbases ++;
voronoi.MakeBase(b);
heap.Insert(b, 0);
}
if (verbose) fprintf (stderr, "%d vertices marked as bases.\n", nbases);
int count = 0;
//WARNING: RANDOMIZING THE CHOICE SEEMS TO BE A GOOD IDEA
bool randomize = false;
bool PREFER_TERMINALS = true;
bool USE_TIEBREAKERS = GLOBAL_USE_VORONOI_TIE_BREAKER && (randomize || PREFER_TERMINALS);
//perform multisource Dijkstra
while (!heap.IsEmpty()) {
unsigned int v;
EdgeCost dist;
heap.RemoveFirst(v, dist);
count++;
if (verbose) fprintf (stderr, "%d ", dist);
//foreach (WeightedGraph.Arc arc in g.ArcEnumerator(v)) {
SPGArc *a, *end;
for (g.GetBounds(v,a,end); a<end; a++) {
int w = a->head; //g.GetArcHead(pa);
EdgeCost newdist = dist;
if (pertcost == NULL) newdist += a->cost; //g.GetArcCost(pa);
else newdist += pertcost[a->label];
bool improve = false;
if (voronoi.GetBase(w) == 0) improve = true;
else if (newdist <= voronoi.GetDistance(w)) improve = true; //using leq here to prefer shorter edges...
else if (USE_TIEBREAKERS && newdist == voronoi.GetDistance(w)) {
if (randomize) {
fatal ("randomization not implemented!\n"); //(stderr, "NOT IMPLEMENTED!\n
//improve = (random.GetInteger(0, 1) == 0); //(arc.cost < g.GetCost(voronoi.GetParentArc(w)));
} else if (PREFER_TERMINALS) {
improve = g.IsTerminal(voronoi.GetBase(v));
}
//improve = (arc.cost < g.GetCost(voronoi.GetParentArc(w)));
}
if (improve) { //make w a tentative child of v
heap.Insert(w, newdist);
voronoi.Update(w, voronoi.GetBase(v), a->label, newdist);
}
}
}
}
/// Iteratively removes all degree-one vertices in the solution.
/// Takes O(1) if there are no such vertices. If there are, takes
/// O(n) + degree of all vertices removed.
/// <param name="solution">Original solution (will be modified).</param>
static void Prune(Graph &g, SteinerSolution &solution) {
if (solution.LeafCount()==0) return; //WARNING: THIS SHOULD BE THERE!
int n = g.VertexCount();
for (int v=1; v<=n; v++) {
int t = v;
while (solution.GetDegree(t)==1 && !g.IsTerminal(t)) {
//find the unique incident solution edge
SPGArc *a, *end;
for (g.GetBounds(t,a,end); a<end; a++) {
int alabel = a->label;
if (!solution.Contains(alabel)) continue;
solution.Remove(alabel);
t = g.GetOther(alabel,t); //process the other endpoint next
break;
}
}
}
}
};
|
pzgbbrd_static.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include "bulge.h"
#include "plasma_core_blas.h"
#include <omp.h>
#include <sched.h>
#include <string.h>
#undef REAL
#define COMPLEX
#define shift 3
#define ss_cond_set(m, n, val) \
{ \
plasma->ss_progress[(m)+plasma->ss_ld*(n)] = (val); \
}
#define ss_cond_wait(m, n, val) \
{ \
while (plasma->ss_progress[(m)+plasma->ss_ld*(n)] != (val)) \
sched_yield(); \
}
#define AL(m_, n_) (A + nb + lda * (n_) + ((m_)-(n_)))
#define AU(m_, n_) (A + nb + lda * (n_) + ((m_)-(n_)+nb))
// Parallel bulge chasing column-wise - static scheduling
void plasma_pzgbbrd_static (plasma_enum_t uplo, int minmn, int nb, int Vblksiz,
plasma_complex64_t *A, int lda,
plasma_complex64_t *VQ, plasma_complex64_t *TAUQ,
plasma_complex64_t *VP, plasma_complex64_t *TAUP,
double *D, double *E, int wantz,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return;
}
// Check sequence status.
if (sequence->status != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorSequence);
return;
}
// Quick return
if (minmn == 0){
return;
}
int nbtiles = plasma_ceildiv(minmn,nb);
int colblktile = 1;
int grsiz = 1;
int maxrequiredcores = imax( nbtiles/colblktile, 1 );
int colpercore = colblktile*nb;
int thgrsiz = minmn;
// Initialize static scheduler progress table
int cores_num;
#pragma omp parallel
{
cores_num = omp_get_num_threads();
}
int size = 2*nbtiles+shift+cores_num+10;
plasma->ss_progress = (volatile int *)malloc(size*sizeof(int));
for(int index = 0; index < size; index++) plasma->ss_progress[index] = 0;
plasma->ss_ld = (size);
// main bulge chasing code
int allcoresnb = cores_num;
int ii = shift/grsiz;
int stepercol = ii*grsiz == shift ? ii:ii+1;
ii = (minmn-1)/thgrsiz;
int thgrnb = ii*thgrsiz == (minmn-1) ? ii:ii+1;
allcoresnb = imin( allcoresnb, maxrequiredcores );
#pragma omp parallel
{
int coreid, sweepid, myid, stt, st, ed, stind, edind;
int blklastind, colpt, thgrid, thed;
int i,j,m,k;
int my_core_id = omp_get_thread_num();
plasma_complex64_t *WORK = work.spaces[my_core_id];
for (thgrid = 1; thgrid<=thgrnb; thgrid++){
stt = (thgrid-1)*thgrsiz+1;
thed = imin( (stt + thgrsiz -1), (minmn-1));
for (i = stt; i <= minmn-1; i++){
ed = imin(i,thed);
if(stt>ed) break;
for (m = 1; m <=stepercol; m++){
st=stt;
for (sweepid = st; sweepid <=ed; sweepid++){
for (k = 1; k <=grsiz; k++){
myid = (i-sweepid)*(stepercol*grsiz) +(m-1)*grsiz + k;
if(myid%2 ==0){
colpt = (myid/2)*nb+1+sweepid-1;
stind = colpt-nb+1;
edind = imin(colpt,minmn);
blklastind = colpt;
} else {
colpt = ((myid+1)/2)*nb + 1 +sweepid -1 ;
stind = colpt-nb+1;
edind = imin(colpt,minmn);
if( (stind>=edind-1) && (edind==minmn) )
blklastind=minmn;
else
blklastind=0;
}
coreid = (stind/colpercore)%allcoresnb;
if(my_core_id==coreid) {
if(myid==1) {
ss_cond_wait(myid+shift-1, 0, sweepid-1);
plasma_core_zgbtype1cb(uplo, minmn, nb, A, lda, VQ, TAUQ, VP, TAUP, stind-1, edind-1, sweepid-1, Vblksiz, wantz, WORK);
ss_cond_set(myid, 0, sweepid);
if(blklastind >= (minmn-1)) {
for (j = 1; j <= shift; j++)
ss_cond_set(myid+j, 0, sweepid);
}
} else {
ss_cond_wait(myid-1, 0, sweepid);
ss_cond_wait(myid+shift-1, 0, sweepid-1);
if(myid%2 == 0){
plasma_core_zgbtype2cb(uplo, minmn, nb, A, lda, VQ, TAUQ, VP, TAUP, stind-1, edind-1, sweepid-1, Vblksiz, wantz, WORK);
}else{
plasma_core_zgbtype3cb(uplo, minmn, nb, A, lda, VQ, TAUQ, VP, TAUP, stind-1, edind-1, sweepid-1, Vblksiz, wantz, WORK);
}
ss_cond_set(myid, 0, sweepid);
if(blklastind >= (minmn-1)) {
for (j = 1; j <= shift+allcoresnb; j++)
ss_cond_set(myid+j, 0, sweepid);
}
} // if myid==1
} //if my_core_id==coreid
if(blklastind >= (minmn-1)) {
stt++;
break;
}
} // for k=1:grsiz
} // for sweepid=st:ed
} // for m=1:stepercol
} // for i=1:minmn-1
} // for thgrid=1:thgrnb
}
free((void*)plasma->ss_progress);
//===========================================================
// store resulting diag and lower diag D and E
// note that D and E are always real after the bulgechasing
//===========================================================
// sequential code here so only core 0 will work
if( uplo == PlasmaLower ){
for (int i=0; i < minmn-1; i++) {
D[i] = creal(*AL(i,i));
E[i] = creal(*AL(i+1,i));
}
D[minmn-1] = creal(*AL(minmn-1,minmn-1));
}
else {
for (int i=0; i < minmn-1; i++) {
D[i] = creal(*AU(i,i));
E[i] = creal(*AU(i,i+1));
}
D[minmn-1] = creal(*AU(minmn-1,minmn-1));
}
return;
}
#undef AL
#undef AU
|
simd_metadata.c | // RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86
// RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX
// RUN: %clang_cc1 -fopenmp -triple x86_64-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512
// RUN: %clang_cc1 -fopenmp -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86
// RUN: %clang_cc1 -fopenmp -triple i386-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX
// RUN: %clang_cc1 -fopenmp -triple i386-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512
// RUN: %clang_cc1 -fopenmp -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC
// RUN: %clang_cc1 -fopenmp -triple powerpc64-unknown-unknown -target-abi elfv1-qpx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC-QPX
// RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86
// RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX
// RUN: %clang_cc1 -fopenmp-simd -triple x86_64-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512
// RUN: %clang_cc1 -fopenmp-simd -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86
// RUN: %clang_cc1 -fopenmp-simd -triple i386-unknown-unknown -target-feature +avx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX
// RUN: %clang_cc1 -fopenmp-simd -triple i386-unknown-unknown -target-feature +avx512f -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=X86-AVX512
// RUN: %clang_cc1 -fopenmp-simd -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC
// RUN: %clang_cc1 -fopenmp-simd -triple powerpc64-unknown-unknown -target-abi elfv1-qpx -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK -check-prefix=PPC-QPX
void h1(float *c, float *a, double b[], int size)
{
// CHECK-LABEL: define void @h1
int t = 0;
#pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b)
// CHECK: [[C_PTRINT:%.+]] = ptrtoint
// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31
// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]])
// CHECK: [[A_PTRINT:%.+]] = ptrtoint
// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31
// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63
// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]])
// CHECK: [[B_PTRINT:%.+]] = ptrtoint
// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63
// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]])
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
}
// do not emit parallel_loop_access metadata due to usage of safelen clause.
// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.mem.parallel_loop_access {{![0-9]+}}
#pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b) simdlen(8)
// CHECK: [[C_PTRINT:%.+]] = ptrtoint
// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31
// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]])
// CHECK: [[A_PTRINT:%.+]] = ptrtoint
// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31
// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63
// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]])
// CHECK: [[B_PTRINT:%.+]] = ptrtoint
// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63
// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]])
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
}
// do not emit parallel_loop_access metadata due to usage of safelen clause.
// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.mem.parallel_loop_access {{![0-9]+}}
#pragma omp simd linear(t) aligned(c:32) aligned(a,b) simdlen(8)
// CHECK: [[C_PTRINT:%.+]] = ptrtoint
// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31
// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]])
// CHECK: [[A_PTRINT:%.+]] = ptrtoint
// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31
// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63
// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15
// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]])
// CHECK: [[B_PTRINT:%.+]] = ptrtoint
// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63
// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15
// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31
// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0
// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]])
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.mem.parallel_loop_access {{![0-9]+}}
}
}
void h2(float *c, float *a, float *b, int size)
{
// CHECK-LABEL: define void @h2
int t = 0;
#pragma omp simd linear(t)
for (int i = 0; i < size; ++i) {
c[i] = a[i] * a[i] + b[i] * b[t];
++t;
// CHECK: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.mem.parallel_loop_access [[LOOP_H2_HEADER:![0-9]+]]
}
}
void h3(float *c, float *a, float *b, int size)
{
// CHECK-LABEL: define void @h3
#pragma omp simd
for (int i = 0; i < size; ++i) {
for (int j = 0; j < size; ++j) {
c[j*i] = a[i] * b[j];
}
}
// do not emit parallel_loop_access for nested loop.
// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.mem.parallel_loop_access {{![0-9]+}}
}
// Metadata for h1:
// CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_16:![0-9]+]], [[LOOP_VEC_ENABLE:![0-9]+]]}
// CHECK: [[LOOP_WIDTH_16]] = !{!"llvm.loop.vectorize.width", i32 16}
// CHECK: [[LOOP_VEC_ENABLE]] = !{!"llvm.loop.vectorize.enable", i1 true}
// CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_8:![0-9]+]], [[LOOP_VEC_ENABLE]]}
// CHECK: [[LOOP_WIDTH_8]] = !{!"llvm.loop.vectorize.width", i32 8}
// CHECK: [[LOOP_H1_HEADER:![0-9]+]] = distinct !{[[LOOP_H1_HEADER]], [[LOOP_WIDTH_8]], [[LOOP_VEC_ENABLE]]}
//
// Metadata for h2:
// CHECK: [[LOOP_H2_HEADER]] = distinct !{[[LOOP_H2_HEADER]], [[LOOP_VEC_ENABLE]]}
//
// Metadata for h3:
// CHECK: [[LOOP_H3_HEADER:![0-9]+]] = distinct !{[[LOOP_H3_HEADER]], [[LOOP_VEC_ENABLE]]}
//
|
GB_binop__iseq_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__iseq_int32
// A.*B function (eWiseMult): GB_AemultB__iseq_int32
// A*D function (colscale): GB_AxD__iseq_int32
// D*A function (rowscale): GB_DxB__iseq_int32
// C+=B function (dense accum): GB_Cdense_accumB__iseq_int32
// C+=b function (dense accum): GB_Cdense_accumb__iseq_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_int32
// C=scalar+B GB_bind1st__iseq_int32
// C=scalar+B' GB_bind1st_tran__iseq_int32
// C=A+scalar GB_bind2nd__iseq_int32
// C=A'+scalar GB_bind2nd_tran__iseq_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT32 || GxB_NO_ISEQ_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__iseq_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__iseq_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__iseq_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__iseq_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__iseq_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__iseq_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__iseq_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__iseq_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__iseq_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__iseq_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__iseq_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_1x1_pack1to4_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack1to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack1to4_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack1to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2 * outw + w;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const __fp16* r0 = bottom_blob.channel(p);
__fp16* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
outptr[0] = r0[0];
r0 += 2;
outptr += 1;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack1to4_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
pmm-OpenMP.c | #include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free()
#include <stdio.h> // biblioteca donde se encuentra la función printf()
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_set_dynamic(0);
#define omp_set_num_threads(4);
#endif
int main(int argc, char ** argv){
int **A, **B, **C;
int i, k, j, N;
double cgt1, cgt2, ncgt; //para tiempo de ejecución
time_t t;
// Semilla de rand()
srand((unsigned) time(&t));
// Obtenemos el numero de filas x columnas de la matriz cuadrada
if(argc < 4){
fprintf(stderr,"Error: %s <N_filas> <N_hebras/max> <Chunk default/(0...i)> <Sched (static, dynamic, guided)>\n", argv[0]);
exit(-1);
}
N = atoi(argv[1]);
// == Directivas de OpenMP
// ====================================================>
int chunk = 0;
omp_sched_t kind;
if(strcmp(argv[3], "default") == 0)
omp_get_schedule(&kind, &chunk);
else
chunk = atoi(argv[3]);
// Modificar OMP_SCHEDULE
if(strcmp(argv[4], "static") == 0) omp_set_schedule(1, chunk);
else if(strcmp(argv[4], "dynamic") == 0) omp_set_schedule(2, chunk);
else if(strcmp(argv[4], "guided") == 0) omp_set_schedule(3, chunk);
else {
printf("Error en el metodo de asignacion de trabajo a las hebras (static, dynamic, guided)\n");
exit(-1);
}
int nhebras;
if(strcmp(argv[2], "max") == 0) omp_set_num_threads(omp_get_num_procs());
else {
nhebras = atoi(argv[2]);
omp_set_num_threads(nhebras);
}
// == Reserva de Memoria
// ====================================================>
A = (int**) malloc (N*sizeof(int*));
B = (int**) malloc (N*sizeof(int*));
C = (int**) malloc (N*sizeof(int*));
#pragma omp parallel for shared(A,B,C,N) private(i) default(none) schedule(runtime)
for(i = 0; i<N; i++){
A[i] = (int*) malloc (N*sizeof(int));
B[i] = (int*) malloc (N*sizeof(int));
C[i] = (int*) malloc (N*sizeof(int));
if( A[i] == NULL || B[i] == NULL || C[i] == NULL){
printf("Error en la reserva de espacio para las matrices\n");
exit(-2);
}
}
// == Inicializacion
// ====================================================>
#pragma omp parallel for shared(A,B,C,N) private(i,k) default(none) schedule(runtime)
for(i = 0; i<N; i++){
for(k = 0; k<N; k++){
A[i][k] = 0;
B[i][k] = rand() % 8;
C[i][k] = rand() % 8;
}
}
// == Calculo
// ====================================================>
cgt1 = omp_get_wtime();
#pragma omp parallel for shared(A,B,C,N) private(i,j,k) default(none) schedule(runtime)
for(i = 0; i<N; i++){
for(j = 0; j<N; j++)
for(k = 0; k<N; k++)
A[i][j] += B[i][k] * C[k][j];
}
cgt2 = omp_get_wtime();
ncgt = (double)(cgt2 - cgt1);
// == Imprimir Mensajes
// ====================================================>
printf("Tiempo(seg.):%11.9f\n", ncgt);
printf("Tamaño total reservado por las matrices: %lu bytes\n", 3*N*N*sizeof(int));
printf("Tamaño de las matrices: %ux%u -> %lu bytes\n", N, N, N*N*sizeof(int));
// Imprimir el primer y último componente del resultado evita que las optimizaciones del compilador
// eliminen el código de la suma.
printf("A[0][0] = %u ... A[N-1][N-1] = %u \n", A[0][0], A[N-1][N-1]);
if(N < 4){
printf("\n----------- Matriz B ----------- \n");
for(i = 0; i<N; i++){
for(k = 0; k<N; k++)
printf("%u\t", B[i][k]);
printf("\n");
}
printf("\n----------- Matriz C ----------- \n");
for(i = 0; i<N; i++){
for(k = 0; k<N; k++)
printf("%u\t", C[i][k]);
printf("\n");
}
printf("\n----------- Matriz A (Resultado) ----------- \n");
for(i = 0; i<N; i++){
for(k = 0; k<N; k++)
printf("%u\t", A[i][k]);
printf("\n");
}
}
// == Liberar Memoria
// ====================================================>
#pragma omp parallel for private(i) shared(A,B,C,N) default(none) schedule(runtime)
for(i = 0; i<N; i++){
free(A[i]);
free(B[i]);
free(C[i]);
}
free(A);
free(B);
free(C);
} |
GB_unop__cos_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__cos_fp32_fp32
// op(A') function: GB_unop_tran__cos_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = cosf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cosf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = cosf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COS || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__cos_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = cosf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__cos_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_uint16_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint16_fp32
// op(A') function: GB_tran__minv_uint16_fp32
// C type: uint16_t
// A type: float
// cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16)
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 16)
#define GB_ATYPE \
float
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 16) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z ; GB_CAST_UNSIGNED(z,aij,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT16 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint16_fp32
(
uint16_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint16_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__lor_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lor_fp64
// A.*B function (eWiseMult): GB_AemultB__lor_fp64
// A*D function (colscale): GB_AxD__lor_fp64
// D*A function (rowscale): GB_DxB__lor_fp64
// C+=B function (dense accum): GB_Cdense_accumB__lor_fp64
// C+=b function (dense accum): GB_Cdense_accumb__lor_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lor_fp64
// C=scalar+B GB_bind1st__lor_fp64
// C=scalar+B' GB_bind1st_tran__lor_fp64
// C=A+scalar GB_bind2nd__lor_fp64
// C=A'+scalar GB_bind2nd_tran__lor_fp64
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = ((aij != 0) || (bij != 0))
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = ((x != 0) || (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOR || GxB_NO_FP64 || GxB_NO_LOR_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lor_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *GB_RESTRICT Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lor_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double bij = Bx [p] ;
Cx [p] = ((x != 0) || (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lor_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
Cx [p] = ((aij != 0) || (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((x != 0) || (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__lor_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) || (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__lor_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
squareddifference_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
int ref_squareddifference_fp32(struct tensor* input_tensor_0, struct tensor* input_tensor_1,
struct tensor* output_tensor, int num_thread)
{
// dims size = 2 or 3
if (input_tensor_0->dim_num < 4)
{
float* input0 = input_tensor_0->data;
float* input1 = input_tensor_1->data;
float* output = output_tensor->data;
int total_size = output_tensor->elem_num;
for (int i = 0; i < total_size; i++)
{
output[i] = powf((input0[i] - input1[i]), 2);
}
return 0;
}
// dims size 3
else if (output_tensor->dim_num == 4)
{
int w = output_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = output_tensor->dims[1];
int size = h * w;
int c_step = h * w;
float* input0 = input_tensor_0->data;
float* input1 = input_tensor_1->data;
float* output = output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src0 = input0 + c_step * q;
float* src1 = input1 + c_step * q;
float* dst = output + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = powf((src0[i] - src1[i]), 2);
}
}
return 0;
}
return -1;
}
int ref_squareddifference_uint8(struct tensor* input_tensor_0, struct tensor* input_tensor_1,
struct tensor* output_tensor, int num_thread)
{
/* dequant */
uint8_t* input0_uint8 = input_tensor_0->data;
uint8_t* input1_uint8 = input_tensor_1->data;
uint8_t* output_uint8 = output_tensor->data;
float input0_scale = input_tensor_0->scale;
float input1_scale = input_tensor_1->scale;
float output_scale = output_tensor->scale;
int32_t input0_zero = input_tensor_0->zero_point;
int32_t input1_zero = input_tensor_1->zero_point;
int32_t output_zero = output_tensor->zero_point;
int input0_size = input_tensor_0->elem_num;
int input1_size = input_tensor_1->elem_num;
int output_size = output_tensor->elem_num;
float* input0 = ( float* )sys_malloc(input0_size * sizeof(float));
float* input1 = ( float* )sys_malloc(input1_size * sizeof(float));
float* output = ( float* )sys_malloc(output_size * sizeof(float));
for (int i = 0; i < input0_size; i++)
{
input0[i] = (( float )input0_uint8[i] - ( float )input0_zero) * input0_scale;
}
for (int i = 0; i < input1_size; i++)
{
input1[i] = (( float )input1_uint8[i] - ( float )input1_zero) * input1_scale;
}
// dims size = 2 or 3
if (input_tensor_0->dim_num < 4)
{
int total_size = output_tensor->elem_num;
for (int i = 0; i < total_size; i++)
{
output[i] = powf((input0[i] - input1[i]), 2);
}
return 0;
}
// dims size 3
else if (output_tensor->dim_num == 4)
{
int w = output_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = output_tensor->dims[1];
int size = h * w;
int c_step = h * w;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src0 = input0 + c_step * q;
float* src1 = input1 + c_step * q;
float* dst = output + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = powf((src0[i] - src1[i]), 2);
}
}
return 0;
}
/* quant */
for (int i = 0; i < output_size; i++)
{
int udata = round(output[i] / output_scale + output_zero);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[i] = udata;
}
sys_free(input0);
sys_free(input1);
sys_free(output);
return -1;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor_0;
struct tensor* input_tensor_1;
struct tensor* output_tensor;
int layout = ir_graph->graph_layout;
input_tensor_0 = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
input_tensor_1 = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
int ret = -1;
if (input_tensor_0->data_type == TENGINE_DT_FP32)
ret = ref_squareddifference_fp32(input_tensor_0, input_tensor_1, output_tensor, exec_graph->num_thread);
else if(input_tensor_0->data_type == TENGINE_DT_UINT8)
ret = ref_squareddifference_uint8(input_tensor_0, input_tensor_1, output_tensor, exec_graph->num_thread);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_squareddifference_ref_op()
{
return register_builtin_node_ops(OP_SQUAREDDIFFERENCE, &hcl_node_ops);
}
int unregister_squareddifference_ref_op()
{
return unregister_builtin_node_ops(OP_SQUAREDDIFFERENCE, &hcl_node_ops);
}
|
ParallelJobsOpenMP.h | /*
* Copyright (C) 2011 University of Szeged
* Copyright (C) 2011 Gabor Loki <loki@webkit.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef ParallelJobsOpenMP_h
#define ParallelJobsOpenMP_h
#if ENABLE(THREADING_OPENMP)
#include <omp.h>
namespace WTF {
class ParallelEnvironment {
WTF_MAKE_NONCOPYABLE(ParallelEnvironment);
public:
typedef void (*ThreadFunction)(void*);
ParallelEnvironment(ThreadFunction threadFunction, size_t sizeOfParameter, int requestedJobNumber) :
m_threadFunction(threadFunction),
m_sizeOfParameter(sizeOfParameter)
{
int maxNumberOfThreads = omp_get_max_threads();
if (!requestedJobNumber || requestedJobNumber > maxNumberOfThreads)
requestedJobNumber = maxNumberOfThreads;
ASSERT(requestedJobNumber > 0);
m_numberOfJobs = requestedJobNumber;
}
int numberOfJobs()
{
return m_numberOfJobs;
}
void execute(unsigned char* parameters)
{
omp_set_num_threads(m_numberOfJobs);
#pragma omp parallel for
for (int i = 0; i < m_numberOfJobs; ++i)
(*m_threadFunction)(parameters + i * m_sizeOfParameter);
}
private:
ThreadFunction m_threadFunction;
size_t m_sizeOfParameter;
int m_numberOfJobs;
};
} // namespace WTF
#endif // ENABLE(THREADING_OPENMP)
#endif // ParallelJobsOpenMP_h
|
rnn_impl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file rnn_impl.h
* \brief
* \author Shu Zhang
*/
#ifndef MXNET_OPERATOR_RNN_IMPL_H_
#define MXNET_OPERATOR_RNN_IMPL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include "./math.h"
#include "./math_functions-inl.h"
#include "./operator_common.h"
#include "./mshadow_op.h"
#include "./linalg.h"
namespace mxnet {
namespace op {
template<typename DType>
inline DType sigmoid(DType x) {
return 1.0f / (1.0f + exp(-x));
}
template<typename DType>
inline DType relu(DType x) {
return x > 0.0f ? static_cast<float>(x) : 0.0f;
}
template<typename DType>
void LstmForwardTrainingSingleLayer(DType* ws,
DType* rs,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H));
const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[i-1][j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
h[j][k] = ht;
// reserve
y[t][j][k + offset] = ht;
c[i][j][k] = ct;
ifgo[i][j][k][0] = it;
ifgo[i][j][k][1] = ft;
ifgo[i][j][k][2] = gt;
ifgo[i][j][k][3] = ot;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
}
}
}
}
template <typename DType>
void LstmForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
const float dropout) {
DType* dropout_random = rs;
DType* rs2 = dropout_random + (L - 1) * D * T * N * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int cell_size = N * H;
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
int idx = 0; // state & cell state's idx;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(rs2 + y_offset, Shape3(T, N, H * D));
LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, false, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, true, T, N, input_size, H, x,
hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
if (dropout > 0.0f) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < T * N * H * D; j++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[i * T * N * H * D + j] = 0;
y.dptr_[j] = 0;
} else {
dropout_random[i * T * N * H * D + j] = 1.0f - dropout;
y.dptr_[j] = y.dptr_[j] / (1.0f - dropout);
}
}
}
x_ptr = y.dptr_;
rs2 += r_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = (rs2 + y_offset)[i];
}
}
template<typename DType>
void LstmForwardInferenceSingleLayer(DType* ws,
bool state_outputs,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4));
Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const int cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < T; ++i) {
int t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
y[t][j][k + offset] = ht;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
} else {
h[j][k] = ht;
c[j][k] = ct;
}
}
}
}
template <typename DType>
void LstmForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int cell_size = N * H;
DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2;
DType* y_cur_ptr = y_ptr;
int idx = 0; // state & cell state's idx;
bool flag = L % 2 ? false : true;
for (int i = 0; i < L; ++i) {
const int input_size = i ? H * D : I;
const int w_size = (input_size + H) * H * 4;
// If bidirectional, need space to save current layer output y.
if (D == 2) {
y_cur_ptr = flag ? y_tmp_ptr : y_ptr;
flag = !flag;
}
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, H * D));
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
// If bidirectional, then calculate the reverse direction's forward result.
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr);
}
// Don't need to move pointer in the last layer.
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
x_ptr = y_cur_ptr;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
}
template <typename DType>
void LstmBackwardSingleLayer(DType* ws,
DType* rs,
DType* tmp_buf,
bool bid,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
const Tensor<cpu, 2, DType> &cx,
const Tensor<cpu, 3, DType> &y,
const Tensor<cpu, 3, DType> &dy,
const Tensor<cpu, 2, DType> &dx,
const Tensor<cpu, 2, DType> &dhx,
const Tensor<cpu, 2, DType> &dcx,
DType* dhy_ptr,
DType* dcy_ptr,
DType* w_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I));
Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4));
Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4));
DType *c_ptr = bid ? rs + T * N * H * 7 : rs;
const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * 4 * H; ++i) {
dwh.dptr_[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 4 * H; ++i) {
dbx.dptr_[i] = 0;
dbh.dptr_[i] = 0;
}
}
Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H));
Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta0 = 0.0;
const DType beta1 = 1.0;
const DType beta2 = 2.0;
const int cell_size = N * H;
if (dhy_ptr != nullptr) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dh.dptr_[i] = dhy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dh.dptr_[i] = 0;
}
}
if (dcy_ptr != nullptr) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dc.dptr_[i] = dcy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < cell_size; ++i) {
dc.dptr_[i] = 0;
}
}
for (int i = T - 1; i >= 0; --i) {
int t = bid ? T - 1 - i : i;
int tnext = bid ? t + 1 : t - 1;
const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx;
const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx;
const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx;
const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx;
#pragma omp parallel for num_threads(omp_threads)
for (int jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType tc = tanh(c[i][j][k]);
DType it = ifgo[i][j][k][0];
DType ft = ifgo[i][j][k][1];
DType gt = ifgo[i][j][k][2];
DType ot = ifgo[i][j][k][3];
dh[j][k] += dy[t][j][k + offset];
dc[j][k] += dh[j][k] * ot * (1 - tc * tc);
difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it);
difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft);
difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt);
difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot);
if (req_statecell != kNullOp || i > 0) {
dcnext[j][k] = dc[j][k] * ft;
}
if (i) {
htmp[j][k] = y[tnext][j][k + offset];
}
}
Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4));
if (req_state != kNullOp || i > 0) {
linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false);
}
if (req_params != kNullOp) {
if (req_params != kAddTo) {
linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false);
} else {
linalg_gemm(dyh, hnext, dwh, alpha, beta2, true, false);
// generate dwx every time step for AddTo
Tensor<cpu, 2, DType> x_t(x.dptr_ + i * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> dyx_t(difgo.dptr_ + i * N * H * 4, Shape2(N, H * 4));
linalg_gemm(dyx_t, x_t, dwx, alpha, beta2, true, false);
}
}
}
Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4));
if (req_data != kNullOp) {
linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false);
}
if (req_params != kNullOp && req_params != kAddTo) {
linalg_gemm(dyx, x, dwx, alpha, beta0, true, false);
}
const int row = T * N;
const int col = H * 4;
if (req_params != kNullOp) {
if (req_params != kAddTo) {
for (int i = 0; i < row; ++i) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
dbx[j] += dyx[i][j];
dbh[j] = dbx[j];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf, Shape2(col, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + col * T, Shape2(col, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < col * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
for (int i = 0; i < N; ++i) {
tmp_dbx[j][t] += dyx[t * N + i][j];
tmp_dbh[j][t] = tmp_dbx[j][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < col; ++j) {
dbx[j] += tmp_dbx[j][t] + dbx[j];
dbh[j] += tmp_dbh[j][t] + dbh[j];
}
}
}
}
}
template <typename DType>
void LstmBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dcy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dcx_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell,
const float dropout) {
DType* dropout_random = rs + (L - 1) * D * T * N * H;
DType* rs2 = rs + (L - 1) * D * T * N * H;
DType* tmp_buf = ws;
DType* ws2 = tmp_buf + 8 * T * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H));
const int b_size = 2 * H * 4;
const int r_size = D * T * N * H * 6;
const int y_offset = T * N * H * 5;
const int w_size1 = (I + H) * H * 4; // first layer
const int w_size2 = (D * H + H) * H * 4; // other layers
const int cell_size = N * H;
DType* dy_tmp_ptr = ws2 + T * cell_size * 4 + cell_size * 3;
for (int i = L - 1; i >= 0; --i) {
const int input_size = i ? H * D : I;
const int w_size = i ? w_size2 : w_size1;
int idx = i * D;
DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr;
DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr;
DType* db_cur_ptr = db_ptr + i * b_size * D;
DType* rs_cur_ptr = rs2 + i * r_size;
DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : nullptr;
DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : nullptr;
Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D));
Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D));
Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size));
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, false, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
if (D == 2) {
w_cur_ptr += w_size;
dw_cur_ptr += w_size;
db_cur_ptr += b_size;
++idx;
dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : nullptr;
dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : nullptr;
LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, true, T, N, input_size, H,
x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx],
dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr,
req_data, req_params, req_state, req_statecell);
}
if (dropout > 0.0f && i > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < T * N * D * H; j++) {
if (dropout_random[j] == 0) {
dx.dptr_[j] = 0;
} else {
dx.dptr_[j] = dx.dptr_[j] / (1.0f - dropout);
}
}
}
dy_ptr = dx.dptr_;
}
}
template<typename DType>
void GruForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gemmC2 + N * 3 * H;
DType* zt = rt + N * H;
DType* nt = zt + N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + 3 * H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + 3 * H * 2: nullptr;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j]+ back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l);
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
}
template<typename DType>
void GruForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gateR;
DType* zt = gateZ;
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + 3 * H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + 3 * H * 2 : nullptr;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_Mnh = Mnh + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * 3 * H;
DType* Mnht = Mnh + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
Mnht[i * H + j] = gemmC2[ntb + j] + bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j]
+ bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j]
+ bx[1][j] + bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] +
zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
rt = back_gateR + (T - 1 - t) * N * H;
zt = back_gateZ + (T - 1 - t) * N * H;
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
DType* back_Mnht = back_Mnh + (T - 1 - t) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int rtb = i * 3 * H;
int ztb = i * 3 * H + H;
int ntb = i * 3 * H + 2 * H;
back_Mnht[i * H + j] = gemmC2[ntb + j] + back_bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] +
gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] +
gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j]
+ rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j]
+ zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateR_l = rs;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l,
gateR_l, gateZ_l, gateN_l, Mnh_l, y_l, hy_l);
gateR_l = gateR_l + T * D * N * H;
gateZ_l = gateZ_l + T * D * N * H;
gateN_l = gateN_l + T * D * N * H;
Mnh_l = Mnh_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void GruBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* rt;
DType* zt;
DType* nt;
DType* dat;
DType* dart;
DType* dar = ws; // [T, N, 3 * H]
DType* da = dar + T * N * 3 * H; // [T, N, 3 * H]
DType* dht1 = da + T * N * 3 * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* Mnht = Mnh;
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_Mnht = Mnh + T * N * H;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_dwx = dwx + I * 3 * H + H * 3 * H;
DType* back_dwh = dwh + I * 3 * H + H * 3 * H;
DType* back_dbx = dbx + 3 * H * 2;
DType* back_dbh = dbh + 3 * H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * 3 * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * 3 * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (int t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
Mnht = Mnh + t * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = dht1[id] * (ht1[i * D * H + j] - nt[id]) *
zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
dht1[id] = dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(3 * H, H));
Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N * T; ++j) {
dbx[i] += da[j * 3 * H + i];
dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] += tmp_dbh[i][t] + dbh[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_da, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (int t = 0; t < T; ++t) {
if (t == T-1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
rt = back_gateR + t * N * H;
zt = back_gateZ + t * N * H;
nt = back_gateN + t * N * H;
back_Mnht = Mnh + (T + t) * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = back_dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = back_dht1[id] * (back_ht1[i * D * H + H + j] -
nt[id]) * zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * back_Mnht[id] * rt[id] *
(1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
back_dht1[id] = back_dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(3 * H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N * T; ++j) {
back_dbx[i] += da[j * 3 * H + i];
back_dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] += tmp_dbh[i][t] + back_dbh[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da2(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_da2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void GruBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H * 3;
DType* dbx = dwh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3)
+ (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* gateR_l = rs + (L - 1) * T * D * N * H;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + 3 * H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H * 3;
} else {
wh_l = wh_l + (D * H) * H * 3;
}
DType* dhy_l = nullptr;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * 3 * H
+ D * I * 3 * H + D * H * 3 * H;
DType* dwh_l = nullptr;
if (L == 1) {
dwh_l = dwx_l + I * H * 3;
} else {
dwh_l = dwx_l + (D * H) * H * 3;
}
DType* dbx_l = dbx + (L - 1) * D * 3 * H * 2;
DType* dbh_l = dbx_l + 3 * H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
int inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
GruBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l,
dhy_l, gateR_l, gateZ_l, gateN_l, Mnh_l, dx_l, dhx_l,
dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateR_l = gateR_l - T * D * N * H;
gateZ_l = gateZ_l - T * D * N * H;
gateN_l = gateN_l - T * D * N * H;
Mnh_l = Mnh_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_l;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * 3 * D;
wh_l = wx_l + inputsize * 3 * H;
dwx_l = dwx_l - (inputsize + H) * H * 3 * D;
dwh_l = dwx_l + inputsize * 3 * H;
} else {
wx_l = wx_l - (I + H) * H * 3 * D;
wh_l = wx_l + I * 3 * H;
dwx_l = dwx_l - (I + H) * H * 3 * D;
dwh_l = dwx_l + I * 3 * H;
}
dbx_l = dbx_l - D * 3 * H * 2;
dbh_l = dbx_l + 3 * H;
}
}
}
template<typename DType>
void VanillaRNNForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + H * 2: nullptr;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
} else {
ht[i * D * H + j] = relu(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
} else {
back_ht[i * D * H + j] = relu(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l,
hy_l, mode);
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
}
template<typename DType>
void VanillaRNNForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateN,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + H * 2 : nullptr;
DType* back_gateN = gateN + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 1, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 1, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (int t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf),
Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
nt[tb + j] = ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] +
gemmC2[tb + j] + bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j];
ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int tb = i * H;
if (mode == 1) {
nt[tb + j] = back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j]
+ gemmC2[tb + j] + back_bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j];
back_ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout,
int mode) {
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateN_l = rs;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn)
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
int rand_data = rand_r(&seed_);
if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H,
x_l, hx_l, wx_l, wh_l, bx_l, bh_l,
gateN_l, y_l, hy_l, mode);
gateN_l = gateN_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void VanillaRNNBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const int T,
const int N,
const int I,
const int H,
const Tensor<cpu, 2, DType> &x,
const Tensor<cpu, 2, DType> &hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateN,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state,
int mode) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* dart;
DType* nt;
DType* dar = ws; // [T, N, H]
DType* dht1 = dar + T * N * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_dwx = dwx + I * H + H * H;
DType* back_dwh = dwh + I * H + H * H;
DType* back_dbx = dbx + H * 2;
DType* back_dbh = dbh + H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (int t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
nt = gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int id = i * H + j;
if (mode == 1) {
dart[id] = dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(dht1[id]) : 0.0f;
}
dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(H, H));
Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N * T; ++j) {
dbx[i] += dar[j * H + i];
dbh[i] = dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] = dbx[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dar, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (int t = 0; t < T; ++t) {
if (t == T-1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
nt = back_gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int id = i * H + j;
if (mode == 1) {
dart[id] = back_dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(back_dht1[id]) : 0.0f;
}
back_dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType>
(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [ H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N * T; ++j) {
back_dbx[i] += dar[j * H + i];
back_dbh[i] = back_dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (int t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (int j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] = back_dbx[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar2(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dar2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void VanillaRNNBackward(DType* ws,
DType* rs,
const int L,
const int D,
const int T,
const int N,
int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout,
int mode) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H;
DType* dbx = dwh + H * H + (D - 1) * (H * H + I * H)
+ (L - 1) * ((D + 1) * H) * H * D;
DType* gateN_l = rs + (L - 1) * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * H
+ D * I * H + D * H * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H;
} else {
wh_l = wh_l + (D * H) * H;
}
DType* dhy_l = nullptr;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * H
+ D * I * H + D * H * H;
DType* dwh_l = nullptr;
if (L == 1) {
dwh_l = dwx_l + I * H;
} else {
dwh_l = dwx_l + (D * H) * H;
}
DType* dbx_l = dbx + (L - 1) * D * H * 2;
DType* dbh_l = dbx_l + H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
int inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
VanillaRNNBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l,
y_l, dy_l, dhy_l, gateN_l, dx_l, dhx_l, dwx_l, dwh_l,
dbx_l, dbh_l, req_data, req_params, req_state, mode);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateN_l = gateN_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_l;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * D;
wh_l = wx_l + inputsize * H;
dwx_l = dwx_l - (inputsize + H) * H * D;
dwh_l = dwx_l + inputsize * H;
} else {
wx_l = wx_l - (I + H) * H * D;
wh_l = wx_l + I * H;
dwx_l = dwx_l - (I + H) * H * D;
dwh_l = dwx_l + I * H;
}
dbx_l = dbx_l - D * H * 2;
dbh_l = dbx_l + H;
}
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_RNN_IMPL_H_
|
dynmat.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <math.h>
#include <stdlib.h>
#include <dynmat.h>
#define PI 3.14159265358979323846
static void get_dynmat_ij(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j);
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j,
const int k);
static double get_dielectric_part(const double q_cart[3],
PHPYCONST double dielectric[3][3]);
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
const double q_cart[3],
const double *q_direction_cart,
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance);
static void make_Hermitian(double *mat, const int num_band);
static void multiply_borns(double *dd,
const double *dd_in,
const int num_patom,
PHPYCONST double (*born)[3][3]);
int dym_get_dynamical_matrix_at_q(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int with_openmp)
{
int i, j, ij;
if (with_openmp) {
#pragma omp parallel for
for (ij = 0; ij < num_patom * num_patom ; ij++) {
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
ij / num_patom, /* i */
ij % num_patom); /* j */
}
} else {
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
get_dynmat_ij(dynamical_matrix,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
mass,
s2p_map,
p2s_map,
charge_sum,
i,
j);
}
}
}
make_Hermitian(dynamical_matrix, num_patom * 3);
return 0;
}
void dym_get_dipole_dipole(double *dd, /* [natom, 3, natom, 3, (real,imag)] */
const double *dd_q0, /* [natom, 3, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
const double q_cart[3],
const double *q_direction_cart, /* must be pointer */
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double factor, /* 4pi/V*unit-conv */
const double lambda,
const double tolerance)
{
int i, k, l, adrs, adrs_sum;
double *dd_tmp;
dd_tmp = NULL;
dd_tmp = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd[i] = 0;
dd_tmp[i] = 0;
}
get_KK(dd_tmp,
G_list,
num_G,
num_patom,
q_cart,
q_direction_cart,
dielectric,
pos,
lambda,
tolerance);
multiply_borns(dd, dd_tmp, num_patom, born);
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + i * 3 + l;
adrs_sum = i * 9 + k * 3 + l;
dd[adrs * 2] -= dd_q0[adrs_sum * 2];
dd[adrs * 2 + 1] -= dd_q0[adrs_sum * 2 + 1];
}
}
}
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd[i] *= factor;
}
/* This may not be necessary. */
/* make_Hermitian(dd, num_patom * 3); */
free(dd_tmp);
dd_tmp = NULL;
}
void dym_get_dipole_dipole_q0(double *dd_q0, /* [natom, 3, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
PHPYCONST double (*born)[3][3],
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance)
{
int i, j, k, l, adrs_tmp, adrs, adrsT;
double zero_vec[3];
double *dd_tmp1, *dd_tmp2;
dd_tmp1 = NULL;
dd_tmp1 = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
dd_tmp2 = NULL;
dd_tmp2 = (double*) malloc(sizeof(double) * num_patom * num_patom * 18);
for (i = 0; i < num_patom * num_patom * 18; i++) {
dd_tmp1[i] = 0;
dd_tmp2[i] = 0;
}
zero_vec[0] = 0;
zero_vec[1] = 0;
zero_vec[2] = 0;
get_KK(dd_tmp1,
G_list,
num_G,
num_patom,
zero_vec,
NULL,
dielectric,
pos,
lambda,
tolerance);
multiply_borns(dd_tmp2, dd_tmp1, num_patom, born);
for (i = 0; i < num_patom * 18; i++) {
dd_q0[i] = 0;
}
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * 9 + k * 3 + l;
for (j = 0; j < num_patom; j++) {
adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ;
dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2];
dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1];
}
}
}
}
/* Summation over another atomic index */
/* for (j = 0; j < num_patom; j++) { */
/* for (k = 0; k < 3; k++) { /\* alpha *\/ */
/* for (l = 0; l < 3; l++) { /\* beta *\/ */
/* adrs = j * 9 + k * 3 + l; */
/* for (i = 0; i < num_patom; i++) { */
/* adrs_tmp = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l ; */
/* dd_q0[adrs * 2] += dd_tmp2[adrs_tmp * 2]; */
/* dd_q0[adrs * 2 + 1] += dd_tmp2[adrs_tmp * 2 + 1]; */
/* } */
/* } */
/* } */
/* } */
for (i = 0; i < num_patom; i++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * 9 + k * 3 + l;
adrsT = i * 9 + l * 3 + k;
dd_q0[adrs * 2] += dd_q0[adrsT * 2];
dd_q0[adrs * 2] /= 2;
dd_q0[adrsT * 2] = dd_q0[adrs * 2];
dd_q0[adrs * 2 + 1] -= dd_q0[adrsT * 2 + 1];
dd_q0[adrs * 2 + 1] /= 2;
dd_q0[adrsT * 2 + 1] = -dd_q0[adrs * 2 + 1];
}
}
}
free(dd_tmp1);
dd_tmp1 = NULL;
free(dd_tmp2);
dd_tmp2 = NULL;
}
void dym_get_charge_sum(double (*charge_sum)[3][3],
const int num_patom,
const double factor, /* 4pi/V*unit-conv and denominator */
const double q_cart[3],
PHPYCONST double (*born)[3][3])
{
int i, j, k, a, b;
double (*q_born)[3];
q_born = (double (*)[3]) malloc(sizeof(double[3]) * num_patom);
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
q_born[i][j] = 0;
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
q_born[i][j] += q_cart[k] * born[i][k][j];
}
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
for (a = 0; a < 3; a++) {
for (b = 0; b < 3; b++) {
charge_sum[i * num_patom + j][a][b] =
q_born[i][a] * q_born[j][b] * factor;
}
}
}
}
free(q_born);
q_born = NULL;
}
/* fc[num_patom, num_satom, 3, 3] */
/* dm[num_comm_points, num_patom * 3, num_patom *3] */
/* comm_points[num_satom, num_patom, 27, 3] */
/* shortest_vectors[num_satom, num_patom, 27, 3] */
/* multiplicities[num_satom, num_patom] */
void dym_transform_dynmat_to_fc(double *fc,
const double *dm,
PHPYCONST double (*comm_points)[3],
PHPYCONST double (*shortest_vectors)[27][3],
const int *multiplicities,
const double *masses,
const int *s2pp_map,
const int *fc_index_map,
const int num_patom,
const int num_satom)
{
int i, j, k, l, m, N, adrs, multi;
double coef, phase, cos_phase, sin_phase;
N = num_satom / num_patom;
for (i = 0; i < num_patom * num_satom * 9; i++) {
fc[i] = 0;
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_satom; j++) {
coef = sqrt(masses[i] * masses[s2pp_map[j]]) / N;
for (k = 0; k < N; k++) {
cos_phase = 0;
sin_phase = 0;
multi = multiplicities[j * num_patom + i];
for (l = 0; l < multi; l++) {
phase = 0;
for (m = 0; m < 3; m++) {
phase -= comm_points[k][m] *
shortest_vectors[j * num_patom + i][l][m];
}
cos_phase += cos(phase * 2 * PI);
sin_phase += sin(phase * 2 * PI);
}
cos_phase /= multi;
sin_phase /= multi;
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
adrs = k * num_patom * num_patom * 18 + i * num_patom * 18 +
l * num_patom * 6 + s2pp_map[j] * 6 + m * 2;
fc[fc_index_map[i] * num_satom * 9 + j * 9 + l * 3 + m] +=
(dm[adrs] * cos_phase - dm[adrs + 1] * sin_phase) * coef;
}
}
}
}
}
}
static void get_dynmat_ij(double *dynamical_matrix,
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const double *mass,
const int *s2p_map,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j)
{
int k, l, adrs;
double mass_sqrt;
double dm_real[3][3], dm_imag[3][3];
mass_sqrt = sqrt(mass[i] * mass[j]);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
dm_real[k][l] = 0;
dm_imag[k][l] = 0;
}
}
for (k = 0; k < num_satom; k++) { /* Lattice points of right index of fc */
if (s2p_map[k] != p2s_map[j]) {
continue;
}
get_dm(dm_real,
dm_imag,
num_patom,
num_satom,
fc,
q,
svecs,
multi,
p2s_map,
charge_sum,
i,
j,
k);
}
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = (i * 3 + k) * num_patom * 3 + j * 3 + l;
dynamical_matrix[adrs * 2] = dm_real[k][l] / mass_sqrt;
dynamical_matrix[adrs * 2 + 1] = dm_imag[k][l] / mass_sqrt;
}
}
}
static void get_dm(double dm_real[3][3],
double dm_imag[3][3],
const int num_patom,
const int num_satom,
const double *fc,
const double q[3],
PHPYCONST double (*svecs)[27][3],
const int *multi,
const int *p2s_map,
PHPYCONST double (*charge_sum)[3][3],
const int i,
const int j,
const int k)
{
int l, m;
double phase, cos_phase, sin_phase, fc_elem;
cos_phase = 0;
sin_phase = 0;
for (l = 0; l < multi[k * num_patom + i]; l++) {
phase = 0;
for (m = 0; m < 3; m++) {
phase += q[m] * svecs[k * num_patom + i][l][m];
}
cos_phase += cos(phase * 2 * PI) / multi[k * num_patom + i];
sin_phase += sin(phase * 2 * PI) / multi[k * num_patom + i];
}
for (l = 0; l < 3; l++) {
for (m = 0; m < 3; m++) {
if (charge_sum) {
fc_elem = (fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m] +
charge_sum[i * num_patom + j][l][m]);
} else {
fc_elem = fc[p2s_map[i] * num_satom * 9 + k * 9 + l * 3 + m];
}
dm_real[l][m] += fc_elem * cos_phase;
dm_imag[l][m] += fc_elem * sin_phase;
}
}
}
static double get_dielectric_part(const double q_cart[3],
PHPYCONST double dielectric[3][3])
{
int i, j;
double x[3];
double sum;
for (i = 0; i < 3; i++) {
x[i] = 0;
for (j = 0; j < 3; j++) {
x[i] += dielectric[i][j] * q_cart[j];
}
}
sum = 0;
for (i = 0; i < 3; i++) {
sum += q_cart[i] * x[i];
}
return sum;
}
static void get_KK(double *dd_part, /* [natom, 3, natom, 3, (real,imag)] */
PHPYCONST double (*G_list)[3], /* [num_G, 3] */
const int num_G,
const int num_patom,
const double q_cart[3],
const double *q_direction_cart,
PHPYCONST double dielectric[3][3],
PHPYCONST double (*pos)[3], /* [num_patom, 3] */
const double lambda,
const double tolerance)
{
int i, j, k, l, g, adrs;
double q_K[3];
double norm, cos_phase, sin_phase, phase, dielectric_part, exp_damp, L2;
double KK[3][3];
L2 = 4 * lambda * lambda;
/* sum over K = G + q and over G (i.e. q=0) */
/* q_direction has values for summation over K at Gamma point. */
/* q_direction is NULL for summation over G */
for (g = 0; g < num_G; g++) {
norm = 0;
for (i = 0; i < 3; i++) {
q_K[i] = G_list[g][i] + q_cart[i];
norm += q_K[i] * q_K[i];
}
if (sqrt(norm) < tolerance) {
if (!q_direction_cart) {
continue;
} else {
dielectric_part = get_dielectric_part(q_direction_cart, dielectric);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
KK[i][j] =
q_direction_cart[i] * q_direction_cart[j] / dielectric_part;
}
}
}
} else {
dielectric_part = get_dielectric_part(q_K, dielectric);
exp_damp = exp(-dielectric_part / L2);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
KK[i][j] = q_K[i] * q_K[j] / dielectric_part * exp_damp;
}
}
}
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
phase = 0;
for (k = 0; k < 3; k++) {
/* For D-type dynamical matrix */
/* phase += (pos[i][k] - pos[j][k]) * q_K[k]; */
/* For C-type dynamical matrix */
phase += (pos[i][k] - pos[j][k]) * G_list[g][k];
}
phase *= 2 * PI;
cos_phase = cos(phase);
sin_phase = sin(phase);
for (k = 0; k < 3; k++) {
for (l = 0; l < 3; l++) {
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
dd_part[adrs * 2] += KK[k][l] * cos_phase;
dd_part[adrs * 2 + 1] += KK[k][l] * sin_phase;
}
}
}
}
}
}
static void make_Hermitian(double *mat, const int num_band)
{
int i, j, adrs, adrsT;
for (i = 0; i < num_band; i++) {
for (j = i; j < num_band; j++) {
adrs = i * num_band + j * 1;
adrs *= 2;
adrsT = j * num_band + i * 1;
adrsT *= 2;
/* real part */
mat[adrs] += mat[adrsT];
mat[adrs] /= 2;
/* imaginary part */
mat[adrs + 1] -= mat[adrsT+ 1];
mat[adrs + 1] /= 2;
/* store */
mat[adrsT] = mat[adrs];
mat[adrsT + 1] = -mat[adrs + 1];
}
}
}
static void multiply_borns(double *dd,
const double *dd_in,
const int num_patom,
PHPYCONST double (*born)[3][3])
{
int i, j, k, l, m, n, adrs, adrs_in;
double zz;
for (i = 0; i < num_patom; i++) {
for (j = 0; j < num_patom; j++) {
for (k = 0; k < 3; k++) { /* alpha */
for (l = 0; l < 3; l++) { /* beta */
adrs = i * num_patom * 9 + k * num_patom * 3 + j * 3 + l;
for (m = 0; m < 3; m++) { /* alpha' */
for (n = 0; n < 3; n++) { /* beta' */
adrs_in = i * num_patom * 9 + m * num_patom * 3 + j * 3 + n ;
zz = born[i][m][k] * born[j][n][l];
dd[adrs * 2] += dd_in[adrs_in * 2] * zz;
dd[adrs * 2 + 1] += dd_in[adrs_in * 2 + 1] * zz;
}
}
}
}
}
}
}
|
Example_scan.1.c | /*
* @@name: scan.1.c
* @@type: C
* @@compilable: yes
* @@linkable: yes
* @@expect: success
* @@version: omp_5.0
*/
#include <stdio.h>
#define N 100
int main(void)
{
int a[N], b[N];
int x = 0;
// initialization
for (int k = 0; k < N; k++)
a[k] = k + 1;
// a[k] is included in the computation of producing results in b[k]
#pragma omp parallel for simd reduction(inscan,+: x)
for (int k = 0; k < N; k++) {
x += a[k];
#pragma omp scan inclusive(x)
b[k] = x;
}
printf("x = %d, b[0:3] = %d %d %d\n", x, b[0], b[1], b[2]);
// 5050, 1 3 6
return 0;
}
|
functions.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "functions.h"
//compute a*b mod p safely
unsigned int modprod(unsigned int a, unsigned int b, unsigned int p) {
unsigned int za = a;
unsigned int ab = 0;
while (b > 0) {
if (b%2 == 1) ab = (ab + za) % p;
za = (2 * za) % p;
b /= 2;
}
return ab;
}
//compute a^b mod p safely
unsigned int modExp(unsigned int a, unsigned int b, unsigned int p) {
unsigned int z = a;
unsigned int aExpb = 1;
while (b > 0) {
if (b%2 == 1) aExpb = modprod(aExpb, z, p);
z = modprod(z, z, p);
b /= 2;
}
return aExpb;
}
//returns either 0 or 1 randomly
unsigned int randomBit() {
return rand()%2;
}
//returns a random integer which is between 2^{n-1} and 2^{n}
unsigned int randXbitInt(unsigned int n) {
unsigned int r = 1;
for (unsigned int i=0; i<n-1; i++) {
r = r*2 + randomBit();
}
return r;
}
//tests for primality and return 1 if N is probably prime and 0 if N is composite
unsigned int isProbablyPrime(unsigned int N) {
if (N%2==0) return 0; //not interested in even numbers (including 2)
unsigned int NsmallPrimes = 168;
unsigned int smallPrimeList[168] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31,
37, 41, 43, 47, 53, 59, 61, 67, 71, 73,
79, 83, 89, 97, 101, 103, 107, 109, 113,
127, 131, 137, 139, 149, 151, 157, 163,
167, 173, 179, 181, 191, 193, 197, 199,
211, 223, 227, 229, 233, 239, 241, 251,
257, 263, 269, 271, 277, 281, 283, 293,
307, 311, 313, 317, 331, 337, 347, 349,
353, 359, 367, 373, 379, 383, 389, 397,
401, 409, 419, 421, 431, 433, 439, 443,
449, 457, 461, 463, 467, 479, 487, 491,
499, 503, 509, 521, 523, 541, 547, 557,
563, 569, 571, 577, 587, 593, 599, 601,
607, 613, 617, 619, 631, 641, 643, 647,
653, 659, 661, 673, 677, 683, 691, 701,
709, 719, 727, 733, 739, 743, 751, 757,
761, 769, 773, 787, 797, 809, 811, 821,
823, 827, 829, 839, 853, 857, 859, 863,
877, 881, 883, 887, 907, 911, 919, 929,
937, 941, 947, 953, 967, 971, 977, 983,
991, 997};
//before using a probablistic primality check, check directly using the small primes list
for (unsigned int n=1;n<NsmallPrimes;n++) {
if (N==smallPrimeList[n]) return 1; //true
if (N%smallPrimeList[n]==0) return 0; //false
}
//if we're testing a large number switch to Miller-Rabin primality test
unsigned int r = 0;
unsigned int d = N-1;
while (d%2 == 0) {
d /= 2;
r += 1;
}
for (unsigned int n=0;n<NsmallPrimes;n++) {
unsigned int k = smallPrimeList[n];
unsigned int x = modExp(k,d,N);
if ((x==1) || (x==N-1)) continue;
for (unsigned int i=1;i<r-1;i++) {
x = modprod(x,x,N);
if (x == 1) return 0; //false
if (x == N-1) break;
}
// see whether we left the loop becasue x==N-1
if (x == N-1) continue;
return 0; //false
}
return 1; //true
}
//Finds a generator of Z_p using the assumption that p=2*q+1
unsigned int findGenerator(unsigned int p) {
unsigned int g;
unsigned int q = (p-1)/2;
do {
//make a random number 1<= g < p
g = randXbitInt(32)%p; //could also have passed n to findGenerator
} while (g==0 || (modExp(g,q,p)==1) || (modExp(g,2,p)==1));
return g;
}
void setupElGamal(unsigned int n, unsigned int *p, unsigned int *g,
unsigned int *h, unsigned int *x) {
/* Use isProbablyPrime and randomXbitInt to find a new random n-bit prime number
which satisfies p=2*q+1 where q is also prime */
unsigned int q;
do {
*p = randXbitInt(n);
q = (*p-1)/2;
} while (!isProbablyPrime(*p) || !isProbablyPrime(q));
/* Use the fact that p=2*q+1 to quickly find a generator */
*g = findGenerator(*p);
//pick a secret key, x
*x = randXbitInt(n)%(*p);
//compute h
*h = modExp(*g,*x,*p);
printf("ElGamal Setup successful.\n");
printf("p = %u. \n", *p);
printf("g = %u is a generator of Z_%u \n", *g, *p);
printf("Secret key: x = %u \n", *x);
printf("h = g^x = %u\n", *h);
printf("\n");
}
void ElGamalEncrypt(unsigned int *m, unsigned int *a, unsigned int Nints,
unsigned int p, unsigned int g, unsigned int h) {
/* Q2.1 Parallelize this function with OpenMP */
#pragma omp parallel for
for (unsigned int i=0; i<Nints;i++) {
//pick y in Z_p randomly
unsigned int y;
do {
y = randXbitInt(32)%p;
} while (y==0); //dont allow y=0
//compute a = g^y
a[i] = modExp(g,y,p);
//compute s = h^y
unsigned int s = modExp(h,y,p);
//encrypt m by multiplying with s
m[i] = modprod(m[i],s,p);
}
}
void ElGamalDecrypt(unsigned int *m, unsigned int *a, unsigned int Nints,
unsigned int p, unsigned int x) {
/* Q2.1 Parallelize this function with OpenMP */
#pragma omp parallel for
for (unsigned int i=0; i<Nints;i++) {
//compute s = a^x
unsigned int s = modExp(a[i],x,p);
//compute s^{-1} = s^{p-2}
unsigned int invS = modExp(s,p-2,p);
//decrypt message by multplying by invS
m[i] = modprod(m[i],invS,p);
}
}
//Pad the end of string so its length is divisible by Nchars
// Assume there is enough allocated storage for the padded string
void padString(unsigned char* string, unsigned int charsPerInt) {
/* Q1.2 Complete this function */
int len = strlen(string);
// printf("%d\n", charsPerInt);
// unsigned char space = ' ';
while ((len/charsPerInt)%charsPerInt != 0) {
string[len] = ' ';
string[len+1] = '\0';
len = len + 1;
}
}
void convertStringToZ(unsigned char *string, unsigned int Nchars,
unsigned int *Z, unsigned int Nints) {
/* Q1.3 Complete this function */
/* Q2.2 Parallelize this function with OpenMP */
unsigned int d1 = (int) (Nchars/Nints);
// padString(string, d1);
printf("%d\n", d1);
printf("%d\n", Nints);
printf("%d\n", Nchars);
padString(string,d1);
#pragma omp parallel for
for (int i = 0; i < strlen(string); i++) {
Z[i] = string[i] - '0';
}
}
void convertZToString(unsigned int *Z, unsigned int Nints,
unsigned char *string, unsigned int Nchars) {
/* Q1.4 Complete this function */
/* Q2.2 Parallelize this function with OpenMP */
unsigned int d2 = (int) (Nchars/Nints);
padString(string, d2);
#pragma omp parallel for
for (int i = 0; i < (sizeof(Z)/sizeof(int)); i++) {
string[i] = Z[i] + '0';
}
}
|
GB_binop__band_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__band_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__band_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__band_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__band_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__band_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__band_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_int64)
// C=scalar+B GB (_bind1st__band_int64)
// C=scalar+B' GB (_bind1st_tran__band_int64)
// C=A+scalar GB (_bind2nd__band_int64)
// C=A'+scalar GB (_bind2nd_tran__band_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x) & (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_INT64 || GxB_NO_BAND_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__band_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__band_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__band_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__band_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__band_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__band_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__band_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__band_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__band_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__band_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB (_bind1st_tran__band_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB (_bind2nd_tran__band_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__lxor_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint8)
// A*D function (colscale): GB (_AxD__lxor_uint8)
// D*A function (rowscale): GB (_DxB__lxor_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint8)
// C=scalar+B GB (_bind1st__lxor_uint8)
// C=scalar+B' GB (_bind1st_tran__lxor_uint8)
// C=A+scalar GB (_bind2nd__lxor_uint8)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT8 || GxB_NO_LXOR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
apply_constant_scalarvalue_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
//
#if !defined(KRATOS_APPLY_CONSTANT_VALUE_PROCESS_H_INCLUDED )
#define KRATOS_APPLY_CONSTANT_VALUE_PROCESS_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/kratos_flags.h"
#include "includes/kratos_parameters.h"
#include "processes/process.h"
namespace Kratos
{
///@name Kratos Classes
///@{
/// The base class for all processes in Kratos.
/** This function applies a constant value (and fixity) to all of the nodes in a given mesh
*/
class ApplyConstantScalarValueProcess : public Process
{
public:
///@name Type Definitions
///@{
KRATOS_DEFINE_LOCAL_FLAG(VARIABLE_IS_FIXED);
/// Pointer definition of ApplyConstantScalarValueProcess
KRATOS_CLASS_POINTER_DEFINITION(ApplyConstantScalarValueProcess);
///@}
///@name Life Cycle
///@{
ApplyConstantScalarValueProcess(ModelPart& model_part,
Parameters rParameters
) : Process(Flags()) , mr_model_part(model_part)
{
KRATOS_TRY
//only include validation with c++11 since raw_literals do not exist in c++03
Parameters default_parameters( R"(
{
"model_part_name":"PLEASE_CHOOSE_MODEL_PART_NAME",
"mesh_id": 0,
"variable_name": "PLEASE_PRESCRIBE_VARIABLE_NAME",
"is_fixed": false,
"value" : 1.0
} )" );
// Some values need to be mandatorily prescribed since no meaningful default value exist. For this reason try accessing to them
// So that an error is thrown if they don't exist
rParameters["value"];
rParameters["variable_name"];
rParameters["model_part_name"];
// Now validate agains defaults -- this also ensures no type mismatch
rParameters.ValidateAndAssignDefaults(default_parameters);
mmesh_id = rParameters["mesh_id"].GetInt();
mvariable_name = rParameters["variable_name"].GetString();
this->Set( VARIABLE_IS_FIXED, rParameters["is_fixed"].GetBool());
if( KratosComponents< Variable<double> >::Has( mvariable_name ) ) //case of double variable
{
mdouble_value = rParameters["value"].GetDouble();
if( model_part.GetNodalSolutionStepVariablesList().Has( KratosComponents< Variable<double> >::Get( mvariable_name ) ) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"trying to fix a variable that is not in the model_part - variable name is ",mvariable_name);
}
}
else if( KratosComponents< VariableComponent< VectorComponentAdaptor<array_1d<double, 3> > > >::Has(mvariable_name) ) //case of component variable
{
typedef VariableComponent< VectorComponentAdaptor<array_1d<double, 3> > > component_type;
component_type var_component = KratosComponents< component_type >::Get(mvariable_name);
if( model_part.GetNodalSolutionStepVariablesList().Has( var_component.GetSourceVariable() ) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"trying to fix a variable that is not in the model_part - variable name is ",mvariable_name);
}
mdouble_value = rParameters["value"].GetDouble();
}
else if( KratosComponents< Variable<int> >::Has( mvariable_name ) ) //case of int variable
{
mint_value = rParameters["value"].GetInt();
if( model_part.GetNodalSolutionStepVariablesList().Has( KratosComponents< Variable<int> >::Get( mvariable_name ) ) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"trying to fix a variable that is not in the model_part - variable name is ",mvariable_name);
}
if(this->Is(VARIABLE_IS_FIXED))
{
KRATOS_THROW_ERROR(std::runtime_error,"sorry it is not possible to fix variables of type Variable<int>. Only double variables or vector components can be fixed","");
}
}
else if( KratosComponents< Variable<bool> >::Has( mvariable_name ) ) //case of bool variable
{
mbool_value = rParameters["value"].GetBool();
if( model_part.GetNodalSolutionStepVariablesList().Has( KratosComponents< Variable<bool> >::Get( mvariable_name ) ) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"trying to fix a variable that is not in the model_part - variable name is ",mvariable_name);
}
if(this->Is(VARIABLE_IS_FIXED))
{
KRATOS_THROW_ERROR(std::runtime_error,"sorry it is not possible to fix variables of type Variable<bool>. Only double variables or vector components can be fixed","");
}
}
KRATOS_CATCH("");
}
ApplyConstantScalarValueProcess(ModelPart& model_part,
const Variable<double>& rVariable,
const double double_value,
std::size_t mesh_id,
Flags options
) : Process(options) , mr_model_part(model_part),mdouble_value(double_value), mint_value(0), mbool_value(false),mmesh_id(mesh_id)
{
KRATOS_TRY;
if(this->IsDefined(VARIABLE_IS_FIXED) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"please specify if the variable is to be fixed or not (flag VARIABLE_IS_FIXED)","");
}
if( model_part.GetNodalSolutionStepVariablesList().Has( rVariable ) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"trying to fix a variable that is not in the model_part - variable name is ",rVariable);
}
mvariable_name = rVariable.Name();
KRATOS_CATCH("");
}
ApplyConstantScalarValueProcess(ModelPart& model_part,
const VariableComponent<VectorComponentAdaptor<array_1d<double, 3> > >& rVariable,
const double double_value,
std::size_t mesh_id,
Flags options
) : Process(options) , mr_model_part(model_part),mdouble_value(double_value), mint_value(0), mbool_value(false),mmesh_id(mesh_id)
{
KRATOS_TRY;
if(this->IsDefined(VARIABLE_IS_FIXED) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"please specify if the variable is to be fixed or not (flag VARIABLE_IS_FIXED)","")
}
mvariable_name = rVariable.Name();
if( model_part.GetNodalSolutionStepVariablesList().Has( rVariable.GetSourceVariable() ) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"trying to fix a variable that is not in the model_part - variable name is ",rVariable);
}
KRATOS_CATCH("");
}
ApplyConstantScalarValueProcess(ModelPart& model_part,
const Variable< int >& rVariable,
const int int_value,
std::size_t mesh_id,
Flags options
) : Process(options) , mr_model_part(model_part),mdouble_value(0.0), mint_value(int_value), mbool_value(false),mmesh_id(mesh_id)
{
KRATOS_TRY;
if(this->IsDefined(VARIABLE_IS_FIXED) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"Please specify if the variable is to be fixed or not (flag VARIABLE_IS_FIXED)","");
}
if(this->Is(VARIABLE_IS_FIXED))
{
KRATOS_THROW_ERROR(std::runtime_error,"Sorry it is not possible to fix variables of type Variable<int>. Only double variables or vector components can be fixed","");
}
if( model_part.GetNodalSolutionStepVariablesList().Has( rVariable ) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"Trying to fix a variable that is not in the model_part - variable name is ",rVariable);
}
mvariable_name = rVariable.Name();
KRATOS_CATCH("");
}
ApplyConstantScalarValueProcess(ModelPart& model_part,
const Variable< bool >& rVariable,
const bool bool_value,
std::size_t mesh_id,
Flags options
) : Process(options) , mr_model_part(model_part),mdouble_value(0.0), mint_value(0), mbool_value(bool_value),mmesh_id(mesh_id)
{
KRATOS_TRY;
if(this->IsDefined(VARIABLE_IS_FIXED) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"Please specify if the variable is to be fixed or not (flag VARIABLE_IS_FIXED)","");
}
if(this->Is(VARIABLE_IS_FIXED))
{
KRATOS_THROW_ERROR(std::runtime_error,"Sorry it is not possible to fix variables of type Variable<int>. Only double variables or vector components can be fixed","");
}
if( model_part.GetNodalSolutionStepVariablesList().Has( rVariable ) == false )
{
KRATOS_THROW_ERROR(std::runtime_error,"Trying to fix a variable that is not in the model_part - variable name is ",rVariable);
}
mvariable_name = rVariable.Name();
KRATOS_CATCH("");
}
/// Destructor.
~ApplyConstantScalarValueProcess() override {}
///@}
///@name Operators
///@{
/// This operator is provided to call the process as a function and simply calls the Execute method.
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
/// Execute method is used to execute the ApplyConstantScalarValueProcess algorithms.
void Execute() override {}
/// this function is designed for being called at the beginning of the computations
/// right after reading the model and the groups
void ExecuteInitialize() override
{
KRATOS_TRY;
const bool is_fixed = this->Is(VARIABLE_IS_FIXED);
if( KratosComponents< Variable<double> >::Has( mvariable_name ) ) //case of double variable
{
InternalApplyValue<>(KratosComponents< Variable<double> >::Get(mvariable_name) , is_fixed, mdouble_value);
}
else if( KratosComponents< VariableComponent< VectorComponentAdaptor<array_1d<double, 3> > > >::Has(mvariable_name) ) //case of component variable
{
typedef VariableComponent< VectorComponentAdaptor<array_1d<double, 3> > > component_type;
component_type var_component = KratosComponents< component_type >::Get(mvariable_name);
InternalApplyValue< component_type, double>(var_component , is_fixed, mdouble_value);
}
else if( KratosComponents< Variable<int> >::Has( mvariable_name ) ) //case of int variable
{
InternalApplyValueWithoutFixing<>(KratosComponents< Variable<int> >::Get(mvariable_name) , mint_value);
}
else if( KratosComponents< Variable<bool> >::Has( mvariable_name ) ) //case of bool variable
{
InternalApplyValueWithoutFixing<>(KratosComponents< Variable<bool> >::Get(mvariable_name), mbool_value);
}
else
{
KRATOS_THROW_ERROR(std::logic_error, "Not able to fix the variable. Attempting to fix variable:",mvariable_name);
}
KRATOS_CATCH("");
}
/// this function is designed for being execute once before the solution loop but after all of the
/// solvers where built
void ExecuteBeforeSolutionLoop() override
{
}
/// this function will be executed at every time step BEFORE performing the solve phase
void ExecuteInitializeSolutionStep() override
{
}
/// this function will be executed at every time step AFTER performing the solve phase
void ExecuteFinalizeSolutionStep() override
{
}
/// this function will be executed at every time step BEFORE writing the output
void ExecuteBeforeOutputStep() override
{
}
/// this function will be executed at every time step AFTER writing the output
void ExecuteAfterOutputStep() override
{
}
/// this function is designed for being called at the end of the computations
/// right after reading the model and the groups
void ExecuteFinalize() override
{
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ApplyConstantScalarValueProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "ApplyConstantScalarValueProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
ModelPart& mr_model_part;
std::string mvariable_name;
double mdouble_value;
int mint_value;
bool mbool_value;
std::size_t mmesh_id;
private:
///@name Static Member Variables
///@{
template< class TVarType, class TDataType >
void InternalApplyValue(TVarType& rVar, const bool to_be_fixed, const TDataType value)
{
const int nnodes = mr_model_part.GetMesh(mmesh_id).Nodes().size();
if(nnodes != 0)
{
ModelPart::NodesContainerType::iterator it_begin = mr_model_part.GetMesh(mmesh_id).NodesBegin();
// ModelPart::NodesContainerType::iterator it_end = mr_model_part.GetMesh(mmesh_id).NodesEnd();
#pragma omp parallel for
for(int i = 0; i<nnodes; i++)
{
ModelPart::NodesContainerType::iterator it = it_begin + i;
if(to_be_fixed)
{
it->Fix(rVar);
}
it->FastGetSolutionStepValue(rVar) = value;
}
}
}
template< class TVarType, class TDataType >
void InternalApplyValueWithoutFixing(TVarType& rVar, const TDataType value)
{
const int nnodes = mr_model_part.GetMesh(mmesh_id).Nodes().size();
if(nnodes != 0)
{
ModelPart::NodesContainerType::iterator it_begin = mr_model_part.GetMesh(mmesh_id).NodesBegin();
// ModelPart::NodesContainerType::iterator it_end = mr_model_part.GetMesh(mmesh_id).NodesEnd();
#pragma omp parallel for
for(int i = 0; i<nnodes; i++)
{
ModelPart::NodesContainerType::iterator it = it_begin + i;
it->FastGetSolutionStepValue(rVar) = value;
}
}
}
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
ApplyConstantScalarValueProcess& operator=(ApplyConstantScalarValueProcess const& rOther);
/// Copy constructor.
//ApplyConstantScalarValueProcess(ApplyConstantScalarValueProcess const& rOther);
///@}
}; // Class ApplyConstantScalarValueProcess
KRATOS_CREATE_LOCAL_FLAG(ApplyConstantScalarValueProcess,VARIABLE_IS_FIXED, 0);
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >> (std::istream& rIStream,
ApplyConstantScalarValueProcess& rThis);
/// output stream function
inline std::ostream& operator << (std::ostream& rOStream,
const ApplyConstantScalarValueProcess& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_APPLY_CONSTANT_VALUE_PROCESS_H_INCLUDED defined
|
ASTMatchers.h | //===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements matchers to be used together with the MatchFinder to
// match AST nodes.
//
// Matchers are created by generator functions, which can be combined in
// a functional in-language DSL to express queries over the C++ AST.
//
// For example, to match a class with a certain name, one would call:
// cxxRecordDecl(hasName("MyClass"))
// which returns a matcher that can be used to find all AST nodes that declare
// a class named 'MyClass'.
//
// For more complicated match expressions we're often interested in accessing
// multiple parts of the matched AST nodes once a match is found. In that case,
// call `.bind("name")` on match expressions that match the nodes you want to
// access.
//
// For example, when we're interested in child classes of a certain class, we
// would write:
// cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child")))
// When the match is found via the MatchFinder, a user provided callback will
// be called with a BoundNodes instance that contains a mapping from the
// strings that we provided for the `.bind()` calls to the nodes that were
// matched.
// In the given example, each time our matcher finds a match we get a callback
// where "child" is bound to the RecordDecl node of the matching child
// class declaration.
//
// See ASTMatchersInternal.h for a more in-depth explanation of the
// implementation details of the matcher framework.
//
// See ASTMatchFinder.h for how to use the generated matchers to run over
// an AST.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTTypeTraits.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/LambdaCapture.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchersInternal.h"
#include "clang/ASTMatchers/ASTMatchersMacros.h"
#include "clang/Basic/AttrKinds.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <limits>
#include <string>
#include <utility>
#include <vector>
namespace clang {
namespace ast_matchers {
/// Maps string IDs to AST nodes matched by parts of a matcher.
///
/// The bound nodes are generated by calling \c bind("id") on the node matchers
/// of the nodes we want to access later.
///
/// The instances of BoundNodes are created by \c MatchFinder when the user's
/// callbacks are executed every time a match is found.
class BoundNodes {
public:
/// Returns the AST node bound to \c ID.
///
/// Returns NULL if there was no node bound to \c ID or if there is a node but
/// it cannot be converted to the specified type.
template <typename T>
const T *getNodeAs(StringRef ID) const {
return MyBoundNodes.getNodeAs<T>(ID);
}
/// Type of mapping from binding identifiers to bound nodes. This type
/// is an associative container with a key type of \c std::string and a value
/// type of \c clang::DynTypedNode
using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap;
/// Retrieve mapping from binding identifiers to bound nodes.
const IDToNodeMap &getMap() const {
return MyBoundNodes.getMap();
}
private:
friend class internal::BoundNodesTreeBuilder;
/// Create BoundNodes from a pre-filled map of bindings.
BoundNodes(internal::BoundNodesMap &MyBoundNodes)
: MyBoundNodes(MyBoundNodes) {}
internal::BoundNodesMap MyBoundNodes;
};
/// Types of matchers for the top-level classes in the AST class
/// hierarchy.
/// @{
using DeclarationMatcher = internal::Matcher<Decl>;
using StatementMatcher = internal::Matcher<Stmt>;
using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
/// @}
/// Matches any node.
///
/// Useful when another matcher requires a child matcher, but there's no
/// additional constraint. This will often be used with an explicit conversion
/// to an \c internal::Matcher<> type such as \c TypeMatcher.
///
/// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g.,
/// \code
/// "int* p" and "void f()" in
/// int* p;
/// void f();
/// \endcode
///
/// Usable as: Any Matcher
inline internal::TrueMatcher anything() { return internal::TrueMatcher(); }
/// Matches the top declaration context.
///
/// Given
/// \code
/// int X;
/// namespace NS {
/// int Y;
/// } // namespace NS
/// \endcode
/// decl(hasDeclContext(translationUnitDecl()))
/// matches "int X", but not "int Y".
extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl>
translationUnitDecl;
/// Matches typedef declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefDecl()
/// matches "typedef int X", but not "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl>
typedefDecl;
/// Matches typedef name declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typedefNameDecl()
/// matches "typedef int X" and "using Y = int"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl>
typedefNameDecl;
/// Matches type alias declarations.
///
/// Given
/// \code
/// typedef int X;
/// using Y = int;
/// \endcode
/// typeAliasDecl()
/// matches "using Y = int", but not "typedef int X"
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl>
typeAliasDecl;
/// Matches type alias template declarations.
///
/// typeAliasTemplateDecl() matches
/// \code
/// template <typename T>
/// using Y = X<T>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
/// Matches AST nodes that were expanded within the main-file.
///
/// Example matches X but not Y
/// (matcher = cxxRecordDecl(isExpansionInMainFile())
/// \code
/// #include <Y.h>
/// class X {};
/// \endcode
/// Y.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInMainFile,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
return SourceManager.isInMainFile(
SourceManager.getExpansionLoc(Node.getBeginLoc()));
}
/// Matches AST nodes that were expanded within system-header-files.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInSystemHeader())
/// \code
/// #include <SystemHeader.h>
/// class X {};
/// \endcode
/// SystemHeader.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
return SourceManager.isInSystemHeader(ExpansionLoc);
}
/// Matches AST nodes that were expanded within files whose name is
/// partially matching a given regex.
///
/// Example matches Y but not X
/// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*"))
/// \code
/// #include "ASTMatcher.h"
/// class X {};
/// \endcode
/// ASTMatcher.h:
/// \code
/// class Y {};
/// \endcode
///
/// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc>
AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt,
TypeLoc),
RegExp) {
auto &SourceManager = Finder->getASTContext().getSourceManager();
auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc());
if (ExpansionLoc.isInvalid()) {
return false;
}
auto FileEntry =
SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc));
if (!FileEntry) {
return false;
}
auto Filename = FileEntry->getName();
return RegExp->match(Filename);
}
/// Matches statements that are (transitively) expanded from the named macro.
/// Does not match if only part of the statement is expanded from that macro or
/// if different parts of the the statement are expanded from different
/// appearances of the macro.
///
/// FIXME: Change to be a polymorphic matcher that works on any syntactic
/// node. There's nothing `Stmt`-specific about it.
AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) {
// Verifies that the statement' beginning and ending are both expanded from
// the same instance of the given macro.
auto& Context = Finder->getASTContext();
llvm::Optional<SourceLocation> B =
internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context);
if (!B) return false;
llvm::Optional<SourceLocation> E =
internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context);
if (!E) return false;
return *B == *E;
}
/// Matches declarations.
///
/// Examples matches \c X, \c C, and the friend declaration inside \c C;
/// \code
/// void X();
/// class C {
/// friend X;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<Decl> decl;
/// Matches a declaration of a linkage specification.
///
/// Given
/// \code
/// extern "C" {}
/// \endcode
/// linkageSpecDecl()
/// matches "extern "C" {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
/// Matches a declaration of anything that could have a name.
///
/// Example matches \c X, \c S, the anonymous union type, \c i, and \c U;
/// \code
/// typedef int X;
/// struct S {
/// union {
/// int i;
/// } U;
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
/// Matches a declaration of label.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelDecl()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl;
/// Matches a declaration of a namespace.
///
/// Given
/// \code
/// namespace {}
/// namespace test {}
/// \endcode
/// namespaceDecl()
/// matches "namespace {}" and "namespace test {}"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl>
namespaceDecl;
/// Matches a declaration of a namespace alias.
///
/// Given
/// \code
/// namespace test {}
/// namespace alias = ::test;
/// \endcode
/// namespaceAliasDecl()
/// matches "namespace alias" but not "namespace test"
extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl>
namespaceAliasDecl;
/// Matches class, struct, and union declarations.
///
/// Example matches \c X, \c Z, \c U, and \c S
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl;
/// Matches C++ class declarations.
///
/// Example matches \c X, \c Z
/// \code
/// class X;
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl>
cxxRecordDecl;
/// Matches C++ class template declarations.
///
/// Example matches \c Z
/// \code
/// template<class T> class Z {};
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl>
classTemplateDecl;
/// Matches C++ class template specializations.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
/// \endcode
/// classTemplateSpecializationDecl()
/// matches the specializations \c A<int> and \c A<double>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplateSpecializationDecl>
classTemplateSpecializationDecl;
/// Matches C++ class template partial specializations.
///
/// Given
/// \code
/// template<class T1, class T2, int I>
/// class A {};
///
/// template<class T, int I>
/// class A<T, T*, I> {};
///
/// template<>
/// class A<int, int, 1> {};
/// \endcode
/// classTemplatePartialSpecializationDecl()
/// matches the specialization \c A<T,T*,I> but not \c A<int,int,1>
extern const internal::VariadicDynCastAllOfMatcher<
Decl, ClassTemplatePartialSpecializationDecl>
classTemplatePartialSpecializationDecl;
/// Matches declarator declarations (field, variable, function
/// and non-type template parameter declarations).
///
/// Given
/// \code
/// class X { int y; };
/// \endcode
/// declaratorDecl()
/// matches \c int y.
extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
declaratorDecl;
/// Matches parameter variable declarations.
///
/// Given
/// \code
/// void f(int x);
/// \endcode
/// parmVarDecl()
/// matches \c int x.
extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
parmVarDecl;
/// Matches C++ access specifier declarations.
///
/// Given
/// \code
/// class C {
/// public:
/// int a;
/// };
/// \endcode
/// accessSpecDecl()
/// matches 'public:'
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
/// \code
/// class C {
/// C() : i(42) {}
/// int i;
/// };
/// \endcode
extern const internal::VariadicAllOfMatcher<CXXCtorInitializer>
cxxCtorInitializer;
/// Matches template arguments.
///
/// Given
/// \code
/// template <typename T> struct C {};
/// C<int> c;
/// \endcode
/// templateArgument()
/// matches 'int' in C<int>.
extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
/// Matches template name.
///
/// Given
/// \code
/// template <typename T> class X { };
/// X<int> xi;
/// \endcode
/// templateName()
/// matches 'X' in X<int>.
extern const internal::VariadicAllOfMatcher<TemplateName> templateName;
/// Matches non-type template parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// nonTypeTemplateParmDecl()
/// matches 'N', but not 'T'.
extern const internal::VariadicDynCastAllOfMatcher<Decl,
NonTypeTemplateParmDecl>
nonTypeTemplateParmDecl;
/// Matches template type parameter declarations.
///
/// Given
/// \code
/// template <typename T, int N> struct C {};
/// \endcode
/// templateTypeParmDecl()
/// matches 'T', but not 'N'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl>
templateTypeParmDecl;
/// Matches public C++ declarations and C++ base specifers that specify public
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a; // fieldDecl(isPublic()) matches 'a'
/// protected: int b;
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived1 : public Base {}; // matches 'Base'
/// struct Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPublic,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_public;
}
/// Matches protected C++ declarations and C++ base specifers that specify
/// protected inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b; // fieldDecl(isProtected()) matches 'b'
/// private: int c;
/// };
/// \endcode
///
/// \code
/// class Base {};
/// class Derived : protected Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isProtected,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_protected;
}
/// Matches private C++ declarations and C++ base specifers that specify private
/// inheritance.
///
/// Examples:
/// \code
/// class C {
/// public: int a;
/// protected: int b;
/// private: int c; // fieldDecl(isPrivate()) matches 'c'
/// };
/// \endcode
///
/// \code
/// struct Base {};
/// struct Derived1 : private Base {}; // matches 'Base'
/// class Derived2 : Base {}; // matches 'Base'
/// \endcode
AST_POLYMORPHIC_MATCHER(isPrivate,
AST_POLYMORPHIC_SUPPORTED_TYPES(Decl,
CXXBaseSpecifier)) {
return getAccessSpecifier(Node) == AS_private;
}
/// Matches non-static data members that are bit-fields.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b;
/// };
/// \endcode
/// fieldDecl(isBitField())
/// matches 'int a;' but not 'int b;'.
AST_MATCHER(FieldDecl, isBitField) {
return Node.isBitField();
}
/// Matches non-static data members that are bit-fields of the specified
/// bit width.
///
/// Given
/// \code
/// class C {
/// int a : 2;
/// int b : 4;
/// int c : 2;
/// };
/// \endcode
/// fieldDecl(hasBitWidth(2))
/// matches 'int a;' and 'int c;' but not 'int b;'.
AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) {
return Node.isBitField() &&
Node.getBitWidthValue(Finder->getASTContext()) == Width;
}
/// Matches non-static data members that have an in-class initializer.
///
/// Given
/// \code
/// class C {
/// int a = 2;
/// int b = 3;
/// int c;
/// };
/// \endcode
/// fieldDecl(hasInClassInitializer(integerLiteral(equals(2))))
/// matches 'int a;' but not 'int b;'.
/// fieldDecl(hasInClassInitializer(anything()))
/// matches 'int a;' and 'int b;' but not 'int c;'.
AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getInClassInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// Determines whether the function is "main", which is the entry point
/// into an executable program.
AST_MATCHER(FunctionDecl, isMain) {
return Node.isMain();
}
/// Matches the specialized template of a specialization declaration.
///
/// Given
/// \code
/// template<typename T> class A {}; #1
/// template<> class A<int> {}; #2
/// \endcode
/// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl()))
/// matches '#2' with classTemplateDecl() matching the class template
/// declaration of 'A' at #1.
AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate,
internal::Matcher<ClassTemplateDecl>, InnerMatcher) {
const ClassTemplateDecl* Decl = Node.getSpecializedTemplate();
return (Decl != nullptr &&
InnerMatcher.matches(*Decl, Finder, Builder));
}
/// Matches a declaration that has been implicitly added
/// by the compiler (eg. implicit default/copy constructors).
AST_MATCHER(Decl, isImplicit) {
return Node.isImplicit();
}
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl that have at least one TemplateArgument matching the given
/// InnerMatcher.
///
/// Given
/// \code
/// template<typename T> class A {};
/// template<> class A<double> {};
/// A<int> a;
///
/// template<typename T> f() {};
/// void func() { f<int>(); };
/// \endcode
///
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(asString("int"))))
/// matches the specialization \c A<int>
///
/// functionDecl(hasAnyTemplateArgument(refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P(
hasAnyTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder,
Builder);
}
/// Causes all nested matchers to be matched with the specified traversal kind.
///
/// Given
/// \code
/// void foo()
/// {
/// int i = 3.0;
/// }
/// \endcode
/// The matcher
/// \code
/// traverse(TK_IgnoreImplicitCastsAndParentheses,
/// varDecl(hasInitializer(floatLiteral().bind("init")))
/// )
/// \endcode
/// matches the variable declaration with "init" bound to the "3.0".
template <typename T>
internal::Matcher<T> traverse(TraversalKind TK,
const internal::Matcher<T> &InnerMatcher) {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>();
}
template <typename T>
internal::BindableMatcher<T>
traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) {
return internal::BindableMatcher<T>(
internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
InnerMatcher.getID().first)
.template unconditionalConvertTo<T>());
}
template <typename... T>
internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>
traverse(TraversalKind TK,
const internal::VariadicOperatorMatcher<T...> &InnerMatcher) {
return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>(
TK, InnerMatcher);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename T, typename ToTypes>
internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>
traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
ArgumentAdapterT, T, ToTypes> &InnerMatcher) {
return internal::TraversalWrapper<
internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T,
ToTypes>>(TK, InnerMatcher);
}
template <template <typename T, typename P1> class MatcherT, typename P1,
typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1<
MatcherT, P1, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>(
TK, InnerMatcher);
}
template <template <typename T, typename P1, typename P2> class MatcherT,
typename P1, typename P2, typename ReturnTypesF>
internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>
traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2<
MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) {
return internal::TraversalWrapper<
internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>(
TK, InnerMatcher);
}
/// Matches expressions that match InnerMatcher after any implicit AST
/// nodes are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// class C {};
/// C a = C();
/// C b;
/// C c = b;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr())))
/// \endcode
/// would match the declarations for a, b, and c.
/// While
/// \code
/// varDecl(hasInitializer(cxxConstructExpr()))
/// \endcode
/// only match the declarations for b and c.
AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after any implicit casts
/// are stripped off.
///
/// Parentheses and explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = 0;
/// const int c = a;
/// int *d = arr;
/// long e = (long) 0l;
/// \endcode
/// The matchers
/// \code
/// varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
/// \endcode
/// would match the declarations for a, b, c, and d, but not e.
/// While
/// \code
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
/// only match the declarations for b, c, and d.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after parentheses and
/// casts are stripped off.
///
/// Implicit and non-C Style casts are also discarded.
/// Given
/// \code
/// int a = 0;
/// char b = (0);
/// void* c = reinterpret_cast<char*>(0);
/// char d = char(0);
/// \endcode
/// The matcher
/// varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
/// would match the declarations for a, b, c, and d.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder);
}
/// Matches expressions that match InnerMatcher after implicit casts and
/// parentheses are stripped off.
///
/// Explicit casts are not discarded.
/// Given
/// \code
/// int arr[5];
/// int a = 0;
/// char b = (0);
/// const int c = a;
/// int *d = (arr);
/// long e = ((long) 0l);
/// \endcode
/// The matchers
/// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
/// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
/// would match the declarations for a, b, c, and d, but not e.
/// while
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// would only match the declaration for a.
AST_MATCHER_P(Expr, ignoringParenImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder);
}
/// Matches types that match InnerMatcher after any parens are stripped.
///
/// Given
/// \code
/// void (*fp)(void);
/// \endcode
/// The matcher
/// \code
/// varDecl(hasType(pointerType(pointee(ignoringParens(functionType())))))
/// \endcode
/// would match the declaration for fp.
AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>,
InnerMatcher, 0) {
return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder);
}
/// Overload \c ignoringParens for \c Expr.
///
/// Given
/// \code
/// const char* str = ("my-string");
/// \endcode
/// The matcher
/// \code
/// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral())))
/// \endcode
/// would match the implicit cast resulting from the assignment.
AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>,
InnerMatcher, 1) {
const Expr *E = Node.IgnoreParens();
return InnerMatcher.matches(*E, Finder, Builder);
}
/// Matches expressions that are instantiation-dependent even if it is
/// neither type- nor value-dependent.
///
/// In the following example, the expression sizeof(sizeof(T() + T()))
/// is instantiation-dependent (since it involves a template parameter T),
/// but is neither type- nor value-dependent, since the type of the inner
/// sizeof is known (std::size_t) and therefore the size of the outer
/// sizeof is known.
/// \code
/// template<typename T>
/// void f(T x, T y) { sizeof(sizeof(T() + T()); }
/// \endcode
/// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T())
AST_MATCHER(Expr, isInstantiationDependent) {
return Node.isInstantiationDependent();
}
/// Matches expressions that are type-dependent because the template type
/// is not yet instantiated.
///
/// For example, the expressions "x" and "x + y" are type-dependent in
/// the following code, but "y" is not type-dependent:
/// \code
/// template<typename T>
/// void add(T x, int y) {
/// x + y;
/// }
/// \endcode
/// expr(isTypeDependent()) matches x + y
AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); }
/// Matches expression that are value-dependent because they contain a
/// non-type template parameter.
///
/// For example, the array bound of "Chars" in the following example is
/// value-dependent.
/// \code
/// template<int Size> int f() { return Size; }
/// \endcode
/// expr(isValueDependent()) matches return Size
AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); }
/// Matches classTemplateSpecializations, templateSpecializationType and
/// functionDecl where the n'th TemplateArgument matches the given InnerMatcher.
///
/// Given
/// \code
/// template<typename T, typename U> class A {};
/// A<bool, int> b;
/// A<int, bool> c;
///
/// template<typename T> void f() {}
/// void func() { f<int>(); };
/// \endcode
/// classTemplateSpecializationDecl(hasTemplateArgument(
/// 1, refersToType(asString("int"))))
/// matches the specialization \c A<bool, int>
///
/// functionDecl(hasTemplateArgument(0, refersToType(asString("int"))))
/// matches the specialization \c f<int>
AST_POLYMORPHIC_MATCHER_P2(
hasTemplateArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType,
FunctionDecl),
unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) {
ArrayRef<TemplateArgument> List =
internal::getTemplateSpecializationArgs(Node);
if (List.size() <= N)
return false;
return InnerMatcher.matches(List[N], Finder, Builder);
}
/// Matches if the number of template arguments equals \p N.
///
/// Given
/// \code
/// template<typename T> struct C {};
/// C<int> c;
/// \endcode
/// classTemplateSpecializationDecl(templateArgumentCountIs(1))
/// matches C<int>.
AST_POLYMORPHIC_MATCHER_P(
templateArgumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl,
TemplateSpecializationType),
unsigned, N) {
return internal::getTemplateSpecializationArgs(Node).size() == N;
}
/// Matches a TemplateArgument that refers to a certain type.
///
/// Given
/// \code
/// struct X {};
/// template<typename T> struct A {};
/// A<X> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToType(class(hasName("X")))))
/// matches the specialization \c A<X>
AST_MATCHER_P(TemplateArgument, refersToType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Type)
return false;
return InnerMatcher.matches(Node.getAsType(), Finder, Builder);
}
/// Matches a TemplateArgument that refers to a certain template.
///
/// Given
/// \code
/// template<template <typename> class S> class X {};
/// template<typename T> class Y {};
/// X<Y> xi;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToTemplate(templateName())))
/// matches the specialization \c X<Y>
AST_MATCHER_P(TemplateArgument, refersToTemplate,
internal::Matcher<TemplateName>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Template)
return false;
return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder);
}
/// Matches a canonical TemplateArgument that refers to a certain
/// declaration.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// classTemplateSpecializationDecl(hasAnyTemplateArgument(
/// refersToDeclaration(fieldDecl(hasName("next")))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, refersToDeclaration,
internal::Matcher<Decl>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Declaration)
return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder);
return false;
}
/// Matches a sugar TemplateArgument that refers to a certain expression.
///
/// Given
/// \code
/// struct B { int next; };
/// template<int(B::*next_ptr)> struct A {};
/// A<&B::next> a;
/// \endcode
/// templateSpecializationType(hasAnyTemplateArgument(
/// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next"))))))))
/// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching
/// \c B::next
AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) {
if (Node.getKind() == TemplateArgument::Expression)
return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder);
return false;
}
/// Matches a TemplateArgument that is an integral value.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(isIntegral()))
/// matches the implicit instantiation of C in C<42>
/// with isIntegral() matching 42.
AST_MATCHER(TemplateArgument, isIntegral) {
return Node.getKind() == TemplateArgument::Integral;
}
/// Matches a TemplateArgument that referes to an integral type.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(refersToIntegralType(asString("int"))))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, refersToIntegralType,
internal::Matcher<QualType>, InnerMatcher) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder);
}
/// Matches a TemplateArgument of integral type with a given value.
///
/// Note that 'Value' is a string as the template argument's value is
/// an arbitrary precision integer. 'Value' must be euqal to the canonical
/// representation of that integral value in base 10.
///
/// Given
/// \code
/// template<int T> struct C {};
/// C<42> c;
/// \endcode
/// classTemplateSpecializationDecl(
/// hasAnyTemplateArgument(equalsIntegralValue("42")))
/// matches the implicit instantiation of C in C<42>.
AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
return Node.getAsIntegral().toString(10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
///
/// Given
/// \code
/// @autoreleasepool {
/// int x = 0;
/// }
/// \endcode
/// autoreleasePoolStmt(stmt()) matches the declaration of "x"
/// inside the autorelease pool.
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
ObjCAutoreleasePoolStmt> autoreleasePoolStmt;
/// Matches any value declaration.
///
/// Example matches A, B, C and F
/// \code
/// enum X { A, B, C };
/// void F();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl;
/// Matches C++ constructor declarations.
///
/// Example matches Foo::Foo() and Foo::Foo(int)
/// \code
/// class Foo {
/// public:
/// Foo();
/// Foo(int);
/// int DoSomething();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl>
cxxConstructorDecl;
/// Matches explicit C++ destructor declarations.
///
/// Example matches Foo::~Foo()
/// \code
/// class Foo {
/// public:
/// virtual ~Foo();
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl>
cxxDestructorDecl;
/// Matches enum declarations.
///
/// Example matches X
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl;
/// Matches enum constants.
///
/// Example matches A, B, C
/// \code
/// enum X {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl>
enumConstantDecl;
/// Matches tag declarations.
///
/// Example matches X, Z, U, S, E
/// \code
/// class X;
/// template<class T> class Z {};
/// struct S {};
/// union U {};
/// enum E {
/// A, B, C
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl;
/// Matches method declarations.
///
/// Example matches y
/// \code
/// class X { void y(); };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl>
cxxMethodDecl;
/// Matches conversion operator declarations.
///
/// Example matches the operator.
/// \code
/// class X { operator int() const; };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl>
cxxConversionDecl;
/// Matches user-defined and implicitly generated deduction guide.
///
/// Example matches the deduction guide.
/// \code
/// template<typename T>
/// class X { X(int) };
/// X(int) -> X<int>;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl>
cxxDeductionGuideDecl;
/// Matches variable declarations.
///
/// Note: this does not match declarations of member variables, which are
/// "field" declarations in Clang parlance.
///
/// Example matches a
/// \code
/// int a;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl;
/// Matches field declarations.
///
/// Given
/// \code
/// class X { int m; };
/// \endcode
/// fieldDecl()
/// matches 'm'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl;
/// Matches indirect field declarations.
///
/// Given
/// \code
/// struct X { struct { int a; }; };
/// \endcode
/// indirectFieldDecl()
/// matches 'a'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl>
indirectFieldDecl;
/// Matches function declarations.
///
/// Example matches f
/// \code
/// void f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl>
functionDecl;
/// Matches C++ function template declarations.
///
/// Example matches f
/// \code
/// template<class T> void f(T t) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl>
functionTemplateDecl;
/// Matches friend declarations.
///
/// Given
/// \code
/// class X { friend void foo(); };
/// \endcode
/// friendDecl()
/// matches 'friend void foo()'.
extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl;
/// Matches statements.
///
/// Given
/// \code
/// { ++a; }
/// \endcode
/// stmt()
/// matches both the compound statement '{ ++a; }' and '++a'.
extern const internal::VariadicAllOfMatcher<Stmt> stmt;
/// Matches declaration statements.
///
/// Given
/// \code
/// int a;
/// \endcode
/// declStmt()
/// matches 'int a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt;
/// Matches member expressions.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// int a; static int b;
/// };
/// \endcode
/// memberExpr()
/// matches this->x, x, y.x, a, this->b
extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr;
/// Matches unresolved member expressions.
///
/// Given
/// \code
/// struct X {
/// template <class T> void f();
/// void g();
/// };
/// template <class T> void h() { X x; x.f<T>(); x.g(); }
/// \endcode
/// unresolvedMemberExpr()
/// matches x.f<T>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr>
unresolvedMemberExpr;
/// Matches member expressions where the actual member referenced could not be
/// resolved because the base expression or the member name was dependent.
///
/// Given
/// \code
/// template <class T> void f() { T t; t.g(); }
/// \endcode
/// cxxDependentScopeMemberExpr()
/// matches t.g
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXDependentScopeMemberExpr>
cxxDependentScopeMemberExpr;
/// Matches call expressions.
///
/// Example matches x.y() and y()
/// \code
/// X x;
/// x.y();
/// y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr;
/// Matches call expressions which were resolved using ADL.
///
/// Example matches y(x) but not y(42) or NS::y(x).
/// \code
/// namespace NS {
/// struct X {};
/// void y(X);
/// }
///
/// void y(...);
///
/// void test() {
/// NS::X x;
/// y(x); // Matches
/// NS::y(x); // Doesn't match
/// y(42); // Doesn't match
/// using NS::y;
/// y(x); // Found by both unqualified lookup and ADL, doesn't match
// }
/// \endcode
AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); }
/// Matches lambda expressions.
///
/// Example matches [&](){return 5;}
/// \code
/// [&](){return 5;}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr;
/// Matches member call expressions.
///
/// Example matches x.y()
/// \code
/// X x;
/// x.y();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr>
cxxMemberCallExpr;
/// Matches ObjectiveC Message invocation expressions.
///
/// The innermost message send invokes the "alloc" class method on the
/// NSString class, while the outermost message send invokes the
/// "initWithString" instance method on the object returned from
/// NSString's "alloc". This matcher should match both message sends.
/// \code
/// [[NSString alloc] initWithString:@"Hello"]
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr>
objcMessageExpr;
/// Matches Objective-C interface declarations.
///
/// Example matches Foo
/// \code
/// @interface Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl>
objcInterfaceDecl;
/// Matches Objective-C implementation declarations.
///
/// Example matches Foo
/// \code
/// @implementation Foo
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl>
objcImplementationDecl;
/// Matches Objective-C protocol declarations.
///
/// Example matches FooDelegate
/// \code
/// @protocol FooDelegate
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl>
objcProtocolDecl;
/// Matches Objective-C category declarations.
///
/// Example matches Foo (Additions)
/// \code
/// @interface Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl>
objcCategoryDecl;
/// Matches Objective-C category definitions.
///
/// Example matches Foo (Additions)
/// \code
/// @implementation Foo (Additions)
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl>
objcCategoryImplDecl;
/// Matches Objective-C method declarations.
///
/// Example matches both declaration and definition of -[Foo method]
/// \code
/// @interface Foo
/// - (void)method;
/// @end
///
/// @implementation Foo
/// - (void)method {}
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl>
objcMethodDecl;
/// Matches block declarations.
///
/// Example matches the declaration of the nameless block printing an input
/// integer.
///
/// \code
/// myFunc(^(int p) {
/// printf("%d", p);
/// })
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl>
blockDecl;
/// Matches Objective-C instance variable declarations.
///
/// Example matches _enabled
/// \code
/// @implementation Foo {
/// BOOL _enabled;
/// }
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl>
objcIvarDecl;
/// Matches Objective-C property declarations.
///
/// Example matches enabled
/// \code
/// @interface Foo
/// @property BOOL enabled;
/// @end
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl>
objcPropertyDecl;
/// Matches Objective-C \@throw statements.
///
/// Example matches \@throw
/// \code
/// @throw obj;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt>
objcThrowStmt;
/// Matches Objective-C @try statements.
///
/// Example matches @try
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt>
objcTryStmt;
/// Matches Objective-C @catch statements.
///
/// Example matches @catch
/// \code
/// @try {}
/// @catch (...) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt>
objcCatchStmt;
/// Matches Objective-C @finally statements.
///
/// Example matches @finally
/// \code
/// @try {}
/// @finally {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt>
objcFinallyStmt;
/// Matches expressions that introduce cleanups to be run at the end
/// of the sub-expression's evaluation.
///
/// Example matches std::string()
/// \code
/// const std::string str = std::string();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups>
exprWithCleanups;
/// Matches init list expressions.
///
/// Given
/// \code
/// int a[] = { 1, 2 };
/// struct B { int x, y; };
/// B b = { 5, 6 };
/// \endcode
/// initListExpr()
/// matches "{ 1, 2 }" and "{ 5, 6 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr>
initListExpr;
/// Matches the syntactic form of init list expressions
/// (if expression have it).
AST_MATCHER_P(InitListExpr, hasSyntacticForm,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *SyntForm = Node.getSyntacticForm();
return (SyntForm != nullptr &&
InnerMatcher.matches(*SyntForm, Finder, Builder));
}
/// Matches C++ initializer list expressions.
///
/// Given
/// \code
/// std::vector<int> a({ 1, 2, 3 });
/// std::vector<int> b = { 4, 5 };
/// int c[] = { 6, 7 };
/// std::pair<int, int> d = { 8, 9 };
/// \endcode
/// cxxStdInitializerListExpr()
/// matches "{ 1, 2, 3 }" and "{ 4, 5 }"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXStdInitializerListExpr>
cxxStdInitializerListExpr;
/// Matches implicit initializers of init list expressions.
///
/// Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
/// \endcode
/// implicitValueInitExpr()
/// matches "[0].y" (implicitly)
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr>
implicitValueInitExpr;
/// Matches paren list expressions.
/// ParenListExprs don't have a predefined type and are used for late parsing.
/// In the final AST, they can be met in template declarations.
///
/// Given
/// \code
/// template<typename T> class X {
/// void f() {
/// X x(*this);
/// int a = 0, b = 1; int i = (a, b);
/// }
/// };
/// \endcode
/// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b)
/// has a predefined type and is a ParenExpr, not a ParenListExpr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr>
parenListExpr;
/// Matches substitutions of non-type template parameters.
///
/// Given
/// \code
/// template <int N>
/// struct A { static const int n = N; };
/// struct B : public A<42> {};
/// \endcode
/// substNonTypeTemplateParmExpr()
/// matches "N" in the right-hand side of "static const int n = N;"
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
/// Matches using declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using X::x;
/// \endcode
/// usingDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
/// Matches using namespace declarations.
///
/// Given
/// \code
/// namespace X { int x; }
/// using namespace X;
/// \endcode
/// usingDirectiveDecl()
/// matches \code using namespace X \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
/// Matches reference to a name that can be looked up during parsing
/// but could not be resolved to a specific declaration.
///
/// Given
/// \code
/// template<typename T>
/// T foo() { T a; return a; }
/// template<typename T>
/// void bar() {
/// foo<T>();
/// }
/// \endcode
/// unresolvedLookupExpr()
/// matches \code foo<T>() \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
unresolvedLookupExpr;
/// Matches unresolved using value declarations.
///
/// Given
/// \code
/// template<typename X>
/// class C : private X {
/// using X::x;
/// };
/// \endcode
/// unresolvedUsingValueDecl()
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingValueDecl>
unresolvedUsingValueDecl;
/// Matches unresolved using value declarations that involve the
/// typename.
///
/// Given
/// \code
/// template <typename T>
/// struct Base { typedef T Foo; };
///
/// template<typename T>
/// struct S : private Base<T> {
/// using typename Base<T>::Foo;
/// };
/// \endcode
/// unresolvedUsingTypenameDecl()
/// matches \code using Base<T>::Foo \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl,
UnresolvedUsingTypenameDecl>
unresolvedUsingTypenameDecl;
/// Matches a constant expression wrapper.
///
/// Example matches the constant in the case statement:
/// (matcher = constantExpr())
/// \code
/// switch (a) {
/// case 37: break;
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr>
constantExpr;
/// Matches parentheses used in expressions.
///
/// Example matches (foo() + 1)
/// \code
/// int foo() { return 1; }
/// int a = (foo() + 1);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr;
/// Matches constructor call expressions (including implicit ones).
///
/// Example matches string(ptr, n) and ptr within arguments of f
/// (matcher = cxxConstructExpr())
/// \code
/// void f(const string &a, const string &b);
/// char *ptr;
/// int n;
/// f(string(ptr, n), ptr);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr>
cxxConstructExpr;
/// Matches unresolved constructor call expressions.
///
/// Example matches T(t) in return statement of f
/// (matcher = cxxUnresolvedConstructExpr())
/// \code
/// template <typename T>
/// void f(const T& t) { return T(t); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
CXXUnresolvedConstructExpr>
cxxUnresolvedConstructExpr;
/// Matches implicit and explicit this expressions.
///
/// Example matches the implicit this expression in "return i".
/// (matcher = cxxThisExpr())
/// \code
/// struct foo {
/// int i;
/// int f() { return i; }
/// };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr>
cxxThisExpr;
/// Matches nodes where temporaries are created.
///
/// Example matches FunctionTakesString(GetStringByValue())
/// (matcher = cxxBindTemporaryExpr())
/// \code
/// FunctionTakesString(GetStringByValue());
/// FunctionTakesStringByPointer(GetStringPointer());
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr>
cxxBindTemporaryExpr;
/// Matches nodes where temporaries are materialized.
///
/// Example: Given
/// \code
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode
/// materializeTemporaryExpr() matches 'f()' in these statements
/// \code
/// T u(f());
/// g(f());
/// f().func();
/// \endcode
/// but does not match
/// \code
/// f();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
MaterializeTemporaryExpr>
materializeTemporaryExpr;
/// Matches new expressions.
///
/// Given
/// \code
/// new X;
/// \endcode
/// cxxNewExpr()
/// matches 'new X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr;
/// Matches delete expressions.
///
/// Given
/// \code
/// delete X;
/// \endcode
/// cxxDeleteExpr()
/// matches 'delete X'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr>
cxxDeleteExpr;
/// Matches noexcept expressions.
///
/// Given
/// \code
/// bool a() noexcept;
/// bool b() noexcept(true);
/// bool c() noexcept(false);
/// bool d() noexcept(noexcept(a()));
/// bool e = noexcept(b()) || noexcept(c());
/// \endcode
/// cxxNoexceptExpr()
/// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`.
/// doesn't match the noexcept specifier in the declarations a, b, c or d.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr>
cxxNoexceptExpr;
/// Matches array subscript expressions.
///
/// Given
/// \code
/// int i = a[1];
/// \endcode
/// arraySubscriptExpr()
/// matches "a[1]"
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr>
arraySubscriptExpr;
/// Matches the value of a default argument at the call site.
///
/// Example matches the CXXDefaultArgExpr placeholder inserted for the
/// default value of the second parameter in the call expression f(42)
/// (matcher = cxxDefaultArgExpr())
/// \code
/// void f(int x, int y = 0);
/// f(42);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr>
cxxDefaultArgExpr;
/// Matches overloaded operator calls.
///
/// Note that if an operator isn't overloaded, it won't match. Instead, use
/// binaryOperator matcher.
/// Currently it does not match operators such as new delete.
/// FIXME: figure out why these do not match?
///
/// Example matches both operator<<((o << b), c) and operator<<(o, b)
/// (matcher = cxxOperatorCallExpr())
/// \code
/// ostream &operator<< (ostream &out, int i) { };
/// ostream &o; int b = 1, c = 1;
/// o << b << c;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr>
cxxOperatorCallExpr;
/// Matches expressions.
///
/// Example matches x()
/// \code
/// void f() { x(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr;
/// Matches expressions that refer to declarations.
///
/// Example matches x in if (x)
/// \code
/// bool x;
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr>
declRefExpr;
/// Matches a reference to an ObjCIvar.
///
/// Example: matches "a" in "init" method:
/// \code
/// @implementation A {
/// NSString *a;
/// }
/// - (void) init {
/// a = @"hello";
/// }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr>
objcIvarRefExpr;
/// Matches a reference to a block.
///
/// Example: matches "^{}":
/// \code
/// void f() { ^{}(); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr;
/// Matches if statements.
///
/// Example matches 'if (x) {}'
/// \code
/// if (x) {}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt;
/// Matches for statements.
///
/// Example matches 'for (;;) {}'
/// \code
/// for (;;) {}
/// int i[] = {1, 2, 3}; for (auto a : i);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt;
/// Matches the increment statement of a for loop.
///
/// Example:
/// forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
/// matches '++x' in
/// \code
/// for (x; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Increment = Node.getInc();
return (Increment != nullptr &&
InnerMatcher.matches(*Increment, Finder, Builder));
}
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopInit(declStmt()))
/// matches 'int x = 0' in
/// \code
/// for (int x = 0; x < N; ++x) { }
/// \endcode
AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>,
InnerMatcher) {
const Stmt *const Init = Node.getInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches range-based for statements.
///
/// cxxForRangeStmt() matches 'for (auto a : i)'
/// \code
/// int i[] = {1, 2, 3}; for (auto a : i);
/// for(int j = 0; j < 5; ++j);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt>
cxxForRangeStmt;
/// Matches the initialization statement of a for loop.
///
/// Example:
/// forStmt(hasLoopVariable(anything()))
/// matches 'int x' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>,
InnerMatcher) {
const VarDecl *const Var = Node.getLoopVariable();
return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder));
}
/// Matches the range initialization statement of a for loop.
///
/// Example:
/// forStmt(hasRangeInit(anything()))
/// matches 'a' in
/// \code
/// for (int x : a) { }
/// \endcode
AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *const Init = Node.getRangeInit();
return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder));
}
/// Matches while statements.
///
/// Given
/// \code
/// while (true) {}
/// \endcode
/// whileStmt()
/// matches 'while (true) {}'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
/// Matches do statements.
///
/// Given
/// \code
/// do {} while (true);
/// \endcode
/// doStmt()
/// matches 'do {} while(true)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
/// Matches break statements.
///
/// Given
/// \code
/// while (true) { break; }
/// \endcode
/// breakStmt()
/// matches 'break'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
/// Matches continue statements.
///
/// Given
/// \code
/// while (true) { continue; }
/// \endcode
/// continueStmt()
/// matches 'continue'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
/// Matches return statements.
///
/// Given
/// \code
/// return 1;
/// \endcode
/// returnStmt()
/// matches 'return 1'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
/// Matches goto statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// gotoStmt()
/// matches 'goto FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
/// Matches label statements.
///
/// Given
/// \code
/// goto FOO;
/// FOO: bar();
/// \endcode
/// labelStmt()
/// matches 'FOO:'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
/// Matches address of label statements (GNU extension).
///
/// Given
/// \code
/// FOO: bar();
/// void *ptr = &&FOO;
/// goto *bar;
/// \endcode
/// addrLabelExpr()
/// matches '&&FOO'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr>
addrLabelExpr;
/// Matches switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchStmt()
/// matches 'switch(a)'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt;
/// Matches case and default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// switchCase()
/// matches 'case 42:' and 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase;
/// Matches case statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// caseStmt()
/// matches 'case 42:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt;
/// Matches default statements inside switch statements.
///
/// Given
/// \code
/// switch(a) { case 42: break; default: break; }
/// \endcode
/// defaultStmt()
/// matches 'default:'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt>
defaultStmt;
/// Matches compound statements.
///
/// Example matches '{}' and '{{}}' in 'for (;;) {{}}'
/// \code
/// for (;;) {{}}
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt>
compoundStmt;
/// Matches catch statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxCatchStmt()
/// matches 'catch(int i)'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt>
cxxCatchStmt;
/// Matches try statements.
///
/// \code
/// try {} catch(int i) {}
/// \endcode
/// cxxTryStmt()
/// matches 'try {}'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt;
/// Matches throw expressions.
///
/// \code
/// try { throw 5; } catch(int i) {}
/// \endcode
/// cxxThrowExpr()
/// matches 'throw 5'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr>
cxxThrowExpr;
/// Matches null statements.
///
/// \code
/// foo();;
/// \endcode
/// nullStmt()
/// matches the second ';'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt;
/// Matches asm statements.
///
/// \code
/// int i = 100;
/// __asm("mov al, 2");
/// \endcode
/// asmStmt()
/// matches '__asm("mov al, 2")'
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
/// Matches bool literals.
///
/// Example matches true
/// \code
/// true
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
/// Matches string literals (also matches wide string literals).
///
/// Example matches "abcd", L"abcd"
/// \code
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral>
stringLiteral;
/// Matches character literals (also matches wchar_t).
///
/// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
/// though.
///
/// Example matches 'a', L'a'
/// \code
/// char ch = 'a';
/// wchar_t chw = L'a';
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
/// Matches integer literals of all sizes / encodings, e.g.
/// 1, 1L, 0x1 and 1U.
///
/// Does not match character-encoded integers such as L'a'.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
integerLiteral;
/// Matches float literals of all sizes / encodings, e.g.
/// 1.0, 1.0f, 1.0L and 1e10.
///
/// Does not match implicit conversions such as
/// \code
/// float a = 10;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral>
floatLiteral;
/// Matches imaginary literals, which are based on integer and floating
/// point literals e.g.: 1i, 1.0i
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral>
imaginaryLiteral;
/// Matches fixed point literals
extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral>
fixedPointLiteral;
/// Matches user defined literal operator call.
///
/// Example match: "foo"_suffix
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
userDefinedLiteral;
/// Matches compound (i.e. non-scalar) literals
///
/// Example match: {1}, (1, 2)
/// \code
/// int array[4] = {1};
/// vector int myvec = (vector int)(1, 2);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
/// Matches GNU __builtin_choose_expr.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr>
chooseExpr;
/// Matches GNU __null expression.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr>
gnuNullExpr;
/// Matches atomic builtins.
/// Example matches __atomic_load_n(ptr, 1)
/// \code
/// void foo() { int *ptr; __atomic_load_n(ptr, 1); }
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr;
/// Matches statement expression (GNU extension).
///
/// Example match: ({ int X = 4; X; })
/// \code
/// int C = ({ int X = 4; X; });
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr;
/// Matches binary operator expressions.
///
/// Example matches a || b
/// \code
/// !(a || b)
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
binaryOperator;
/// Matches unary operator expressions.
///
/// Example matches !a
/// \code
/// !a || b
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator>
unaryOperator;
/// Matches conditional operator expressions.
///
/// Example matches a ? b : c
/// \code
/// (a ? b : c) + 42
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
/// Matches binary conditional operator expressions (GNU extension).
///
/// Example matches a ?: b
/// \code
/// (a ?: b) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
BinaryConditionalOperator>
binaryConditionalOperator;
/// Matches opaque value expressions. They are used as helpers
/// to reference another expressions and can be met
/// in BinaryConditionalOperators, for example.
///
/// Example matches 'a'
/// \code
/// (a ?: c) + 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr>
opaqueValueExpr;
/// Matches a C++ static_assert declaration.
///
/// Example:
/// staticAssertExpr()
/// matches
/// static_assert(sizeof(S) == sizeof(int))
/// in
/// \code
/// struct S {
/// int x;
/// };
/// static_assert(sizeof(S) == sizeof(int));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl>
staticAssertDecl;
/// Matches a reinterpret_cast expression.
///
/// Either the source expression or the destination type can be matched
/// using has(), but hasDestinationType() is more specific and can be
/// more readable.
///
/// Example matches reinterpret_cast<char*>(&p) in
/// \code
/// void* p = reinterpret_cast<char*>(&p);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr>
cxxReinterpretCastExpr;
/// Matches a C++ static_cast expression.
///
/// \see hasDestinationType
/// \see reinterpretCast
///
/// Example:
/// cxxStaticCastExpr()
/// matches
/// static_cast<long>(8)
/// in
/// \code
/// long eight(static_cast<long>(8));
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr>
cxxStaticCastExpr;
/// Matches a dynamic_cast expression.
///
/// Example:
/// cxxDynamicCastExpr()
/// matches
/// dynamic_cast<D*>(&b);
/// in
/// \code
/// struct B { virtual ~B() {} }; struct D : B {};
/// B b;
/// D* p = dynamic_cast<D*>(&b);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr>
cxxDynamicCastExpr;
/// Matches a const_cast expression.
///
/// Example: Matches const_cast<int*>(&r) in
/// \code
/// int n = 42;
/// const int &r(n);
/// int* p = const_cast<int*>(&r);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr>
cxxConstCastExpr;
/// Matches a C-style cast expression.
///
/// Example: Matches (int) 2.2f in
/// \code
/// int i = (int) 2.2f;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr>
cStyleCastExpr;
/// Matches explicit cast expressions.
///
/// Matches any cast expression written in user code, whether it be a
/// C-style cast, a functional-style cast, or a keyword cast.
///
/// Does not match implicit conversions.
///
/// Note: the name "explicitCast" is chosen to match Clang's terminology, as
/// Clang uses the term "cast" to apply to implicit conversions as well as to
/// actual cast expressions.
///
/// \see hasDestinationType.
///
/// Example: matches all five of the casts in
/// \code
/// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
/// \endcode
/// but does not match the implicit conversion in
/// \code
/// long ell = 42;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr>
explicitCastExpr;
/// Matches the implicit cast nodes of Clang's AST.
///
/// This matches many different places, including function call return value
/// eliding, as well as any type conversions.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr>
implicitCastExpr;
/// Matches any cast nodes of Clang's AST.
///
/// Example: castExpr() matches each of the following:
/// \code
/// (int) 3;
/// const_cast<Expr *>(SubExpr);
/// char c = 0;
/// \endcode
/// but does not match
/// \code
/// int i = (0);
/// int k = 0;
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr;
/// Matches functional cast expressions
///
/// Example: Matches Foo(bar);
/// \code
/// Foo f = bar;
/// Foo g = (Foo) bar;
/// Foo h = Foo(bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr>
cxxFunctionalCastExpr;
/// Matches functional cast expressions having N != 1 arguments
///
/// Example: Matches Foo(bar, bar)
/// \code
/// Foo h = Foo(bar, bar);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr>
cxxTemporaryObjectExpr;
/// Matches predefined identifier expressions [C99 6.4.2.2].
///
/// Example: Matches __func__
/// \code
/// printf("%s", __func__);
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr>
predefinedExpr;
/// Matches C99 designated initializer expressions [C99 6.7.8].
///
/// Example: Matches { [2].y = 1.0, [0].x = 1.0 }
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr>
designatedInitExpr;
/// Matches designated initializer expressions that contain
/// a specific number of designators.
///
/// Example: Given
/// \code
/// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 };
/// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 };
/// \endcode
/// designatorCountIs(2)
/// matches '{ [2].y = 1.0, [0].x = 1.0 }',
/// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'.
AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches \c QualTypes in the clang AST.
extern const internal::VariadicAllOfMatcher<QualType> qualType;
/// Matches \c Types in the clang AST.
extern const internal::VariadicAllOfMatcher<Type> type;
/// Matches \c TypeLocs in the clang AST.
extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc;
/// Matches if any of the given matchers matches.
///
/// Unlike \c anyOf, \c eachOf will generate a match result for each
/// matching submatcher.
///
/// For example, in:
/// \code
/// class A { int a; int b; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")),
/// has(fieldDecl(hasName("b")).bind("v"))))
/// \endcode
/// will generate two results binding "v", the first of which binds
/// the field declaration of \c a, the second the field declaration of
/// \c b.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
eachOf;
/// Matches if any of the given matchers matches.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
anyOf;
/// Matches if all given matchers match.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<
2, std::numeric_limits<unsigned>::max()>
allOf;
/// Matches any node regardless of the submatcher.
///
/// However, \c optionally will retain any bindings generated by the submatcher.
/// Useful when additional information which may or may not present about a main
/// matching node is desired.
///
/// For example, in:
/// \code
/// class Foo {
/// int bar;
/// }
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(
/// optionally(has(
/// fieldDecl(hasName("bar")).bind("var")
/// ))).bind("record")
/// \endcode
/// will produce a result binding for both "record" and "var".
/// The matcher will produce a "record" binding for even if there is no data
/// member named "bar" in that class.
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally;
/// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
///
/// Given
/// \code
/// Foo x = bar;
/// int y = sizeof(x) + alignof(x);
/// \endcode
/// unaryExprOrTypeTraitExpr()
/// matches \c sizeof(x) and \c alignof(x)
extern const internal::VariadicDynCastAllOfMatcher<Stmt,
UnaryExprOrTypeTraitExpr>
unaryExprOrTypeTraitExpr;
/// Matches unary expressions that have a specific type of argument.
///
/// Given
/// \code
/// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
/// \endcode
/// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
/// matches \c sizeof(a) and \c alignof(c)
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType ArgumentType = Node.getTypeOfArgument();
return InnerMatcher.matches(ArgumentType, Finder, Builder);
}
/// Matches unary expressions of a certain kind.
///
/// Given
/// \code
/// int x;
/// int s = sizeof(x) + alignof(x)
/// \endcode
/// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
/// matches \c sizeof(x)
///
/// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter
/// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf").
AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) {
return Node.getKind() == Kind;
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// alignof.
inline internal::BindableMatcher<Stmt> alignOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)),
InnerMatcher)));
}
/// Same as unaryExprOrTypeTraitExpr, but only matching
/// sizeof.
inline internal::BindableMatcher<Stmt> sizeOfExpr(
const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) {
return stmt(unaryExprOrTypeTraitExpr(
allOf(ofKind(UETT_SizeOf), InnerMatcher)));
}
/// Matches NamedDecl nodes that have the specified name.
///
/// Supports specifying enclosing namespaces or classes by prefixing the name
/// with '<enclosing>::'.
/// Does not match typedefs of an underlying type with the given name.
///
/// Example matches X (Name == "X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
/// \code
/// namespace a { namespace b { class X; } }
/// \endcode
inline internal::Matcher<NamedDecl> hasName(StringRef Name) {
return internal::Matcher<NamedDecl>(
new internal::HasNameMatcher({std::string(Name)}));
}
/// Matches NamedDecl nodes that have any of the specified names.
///
/// This matcher is only provided as a performance optimization of hasName.
/// \code
/// hasAnyName(a, b, c)
/// \endcode
/// is equivalent to, but faster than
/// \code
/// anyOf(hasName(a), hasName(b), hasName(c))
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef,
internal::hasAnyNameFunc>
hasAnyName;
/// Matches NamedDecl nodes whose fully qualified names contain
/// a substring matched by the given RegExp.
///
/// Supports specifying enclosing namespaces or classes by
/// prefixing the name with '<enclosing>::'. Does not match typedefs
/// of an underlying type with the given name.
///
/// Example matches X (regexp == "::X")
/// \code
/// class X;
/// \endcode
///
/// Example matches X (regexp is one of "::X", "^foo::.*X", among others)
/// \code
/// namespace foo { namespace bar { class X; } }
/// \endcode
AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) {
std::string FullNameString = "::" + Node.getQualifiedNameAsString();
return RegExp->match(FullNameString);
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// Given:
/// \code
/// class A { int operator*(); };
/// const A &operator<<(const A &a, const A &b);
/// A a;
/// a << a; // <-- This matches
/// \endcode
///
/// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the
/// specified line and
/// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*")))
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
inline internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
hasOverloadedOperatorName(StringRef Name) {
return internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(
{std::string(Name)});
}
/// Matches overloaded operator names.
///
/// Matches overloaded operator names specified in strings without the
/// "operator" prefix: e.g. "<<".
///
/// hasAnyOverloadesOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>,
StringRef, internal::hasAnyOverloadedOperatorNameFunc>
hasAnyOverloadedOperatorName;
/// Matches C++ classes that are directly or indirectly derived from a class
/// matching \c Base, or Objective-C classes that directly or indirectly
/// subclass a class matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, Z, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("NSObject"))
/// \code
/// @interface NSObject @end
/// @interface Bar : NSObject @end
/// \endcode
///
/// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl>
AST_POLYMORPHIC_MATCHER_P(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/false);
}
/// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ classes that have a direct or indirect base matching \p
/// BaseSpecMatcher.
///
/// Example:
/// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived
/// \endcode
///
// FIXME: Refactor this and isDerivedFrom to reuse implementation.
AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder);
}
/// Matches C++ classes that have a direct base matching \p BaseSpecMatcher.
///
/// Example:
/// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))
/// \code
/// class Foo;
/// class Bar : Foo {};
/// class Baz : Bar {};
/// class SpecialBase;
/// class Proxy : SpecialBase {}; // matches Proxy
/// class IndirectlyDerived : Proxy {}; // doesn't match
/// \endcode
AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>,
BaseSpecMatcher) {
return Node.hasDefinition() &&
llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) {
return BaseSpecMatcher.matches(Base, Finder, Builder);
});
}
/// Similar to \c isDerivedFrom(), but also matches classes that directly
/// match \c Base.
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
const auto M = anyOf(Base, isDerivedFrom(Base));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Overloaded method as shortcut for
/// \c isSameOrDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isSameOrDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isSameOrDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches C++ or Objective-C classes that are directly derived from a class
/// matching \c Base.
///
/// Note that a class is not considered to be derived from itself.
///
/// Example matches Y, C (Base == hasName("X"))
/// \code
/// class X;
/// class Y : public X {}; // directly derived
/// class Z : public Y {}; // indirectly derived
/// typedef X A;
/// typedef A B;
/// class C : public B {}; // derived from a typedef of X
/// \endcode
///
/// In the following example, Bar matches isDerivedFrom(hasName("X")):
/// \code
/// class Foo;
/// typedef Foo X;
/// class Bar : public Foo {}; // derived from a type that X is a typedef of
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
internal::Matcher<NamedDecl>, Base, 0) {
// Check if the node is a C++ struct/union/class.
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true);
// The node must be an Objective-C class.
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder,
/*Directly=*/true);
}
/// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)).
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
isDirectlyDerivedFrom,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl),
std::string, BaseName, 1) {
if (BaseName.empty())
return false;
const auto M = isDirectlyDerivedFrom(hasName(BaseName));
if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node))
return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder);
const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node);
return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder);
}
/// Matches the first method of a class or struct that satisfies \c
/// InnerMatcher.
///
/// Given:
/// \code
/// class A { void func(); };
/// class B { void member(); };
/// \endcode
///
/// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of
/// \c A but not \c B.
AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(),
Node.method_end(), Finder, Builder);
}
/// Matches the generated class of lambda expressions.
///
/// Given:
/// \code
/// auto x = []{};
/// \endcode
///
/// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of
/// \c decltype(x)
AST_MATCHER(CXXRecordDecl, isLambda) {
return Node.isLambda();
}
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y
/// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X")))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// Usable as: Any Matcher
/// Note that has is direct matcher, so it also matches things like implicit
/// casts and paren casts. If you are matching with expr then you should
/// probably consider using ignoringParenImpCasts like:
/// has(ignoringParenImpCasts(expr())).
extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Z
/// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {}; // Matches X, because X::X is a class of name X inside X.
/// class Y { class X {}; };
/// class Z { class Y { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasDescendantMatcher>
hasDescendant;
/// Matches AST nodes that have child AST nodes that match the
/// provided matcher.
///
/// Example matches X, Y, Y::X, Z::Y, Z::Y::X
/// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X")))
/// \code
/// class X {};
/// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X
/// // inside Y.
/// class Z { class Y { class X {}; }; }; // Does not match Z.
/// \endcode
///
/// ChildT must be an AST base type.
///
/// As opposed to 'has', 'forEach' will cause a match for each result that
/// matches instead of only on the first one.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher>
forEach;
/// Matches AST nodes that have descendant AST nodes that match the
/// provided matcher.
///
/// Example matches X, A, A::X, B, B::C, B::C::X
/// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X")))))
/// \code
/// class X {};
/// class A { class X {}; }; // Matches A, because A::X is a class of name
/// // X inside A.
/// class B { class C { class X {}; }; };
/// \endcode
///
/// DescendantT must be an AST base type.
///
/// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
/// each result that matches instead of only on the first one.
///
/// Note: Recursively combined ForEachDescendant can cause many matches:
/// cxxRecordDecl(forEachDescendant(cxxRecordDecl(
/// forEachDescendant(cxxRecordDecl())
/// )))
/// will match 10 times (plus injected class name matches) on:
/// \code
/// class A { class B { class C { class D { class E {}; }; }; }; };
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::ForEachDescendantMatcher>
forEachDescendant;
/// Matches if the node or any descendant matches.
///
/// Generates results for each match.
///
/// For example, in:
/// \code
/// class A { class B {}; class C {}; };
/// \endcode
/// The matcher:
/// \code
/// cxxRecordDecl(hasName("::A"),
/// findAll(cxxRecordDecl(isDefinition()).bind("m")))
/// \endcode
/// will generate results for \c A, \c B and \c C.
///
/// Usable as: Any Matcher
template <typename T>
internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) {
return eachOf(Matcher, forEachDescendant(Matcher));
}
/// Matches AST nodes that have a parent that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } }
/// \endcode
/// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }".
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasParentMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasParent;
/// Matches AST nodes that have an ancestor that matches the provided
/// matcher.
///
/// Given
/// \code
/// void f() { if (true) { int x = 42; } }
/// void g() { for (;;) { int x = 43; } }
/// \endcode
/// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43.
///
/// Usable as: Any Matcher
extern const internal::ArgumentAdaptingMatcherFunc<
internal::HasAncestorMatcher,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>,
internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>>
hasAncestor;
/// Matches if the provided matcher does not match.
///
/// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X"))))
/// \code
/// class X {};
/// class Y {};
/// \endcode
///
/// Usable as: Any Matcher
extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matches a node if the declaration associated with that node
/// matches the given matcher.
///
/// The associated declaration is:
/// - for type nodes, the declaration of the underlying type
/// - for CallExpr, the declaration of the callee
/// - for MemberExpr, the declaration of the referenced member
/// - for CXXConstructExpr, the declaration of the constructor
/// - for CXXNewExpr, the declaration of the operator new
/// - for ObjCIvarExpr, the declaration of the ivar
///
/// For type nodes, hasDeclaration will generally match the declaration of the
/// sugared type. Given
/// \code
/// class X {};
/// typedef X Y;
/// Y y;
/// \endcode
/// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the
/// typedefDecl. A common use case is to match the underlying, desugared type.
/// This can be achieved by using the hasUnqualifiedDesugaredType matcher:
/// \code
/// varDecl(hasType(hasUnqualifiedDesugaredType(
/// recordType(hasDeclaration(decl())))))
/// \endcode
/// In this matcher, the decl will match the CXXRecordDecl of class X.
///
/// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>,
/// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>,
/// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>,
/// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>,
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
inline internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
return internal::PolymorphicMatcherWithParam1<
internal::HasDeclarationMatcher, internal::Matcher<Decl>,
void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
/// matcher.
///
/// Given
/// \code
/// namespace N { template<class T> void f(T t); }
/// template <class T> void g() { using N::f; f(T()); }
/// \endcode
/// \c unresolvedLookupExpr(hasAnyDeclaration(
/// namedDecl(hasUnderlyingDecl(hasName("::N::f")))))
/// matches the use of \c f in \c g() .
AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>,
InnerMatcher) {
const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl();
return UnderlyingDecl != nullptr &&
InnerMatcher.matches(*UnderlyingDecl, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression, after
/// stripping off any parentheses or implicit casts.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y {};
/// void z(Y y, X x) { y.m(); (g()).m(); x.m(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y")))))
/// matches `y.m()` and `(g()).m()`.
/// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m()`.
/// cxxMemberCallExpr(on(callExpr()))
/// matches `(g()).m()`.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument()
->IgnoreParenImpCasts();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches on the receiver of an ObjectiveC Message expression.
///
/// Example
/// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *")));
/// matches the [webView ...] message invocation.
/// \code
/// NSString *webViewJavaScript = ...
/// UIWebView *webView = ...
/// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>,
InnerMatcher) {
const QualType TypeDecl = Node.getReceiverType();
return InnerMatcher.matches(TypeDecl, Finder, Builder);
}
/// Returns true when the Objective-C method declaration is a class method.
///
/// Example
/// matcher = objcMethodDecl(isClassMethod())
/// matches
/// \code
/// @interface I + (void)foo; @end
/// \endcode
/// but not
/// \code
/// @interface I - (void)bar; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isClassMethod) {
return Node.isClassMethod();
}
/// Returns true when the Objective-C method declaration is an instance method.
///
/// Example
/// matcher = objcMethodDecl(isInstanceMethod())
/// matches
/// \code
/// @interface I - (void)bar; @end
/// \endcode
/// but not
/// \code
/// @interface I + (void)foo; @end
/// \endcode
AST_MATCHER(ObjCMethodDecl, isInstanceMethod) {
return Node.isInstanceMethod();
}
/// Returns true when the Objective-C message is sent to a class.
///
/// Example
/// matcher = objcMessageExpr(isClassMessage())
/// matches
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
/// but not
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isClassMessage) {
return Node.isClassMessage();
}
/// Returns true when the Objective-C message is sent to an instance.
///
/// Example
/// matcher = objcMessageExpr(isInstanceMessage())
/// matches
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// but not
/// \code
/// [NSString stringWithFormat:@"format"];
/// \endcode
AST_MATCHER(ObjCMessageExpr, isInstanceMessage) {
return Node.isInstanceMessage();
}
/// Matches if the Objective-C message is sent to an instance,
/// and the inner matcher matches on that instance.
///
/// For example the method call in
/// \code
/// NSString *x = @"hello";
/// [x containsString:@"h"];
/// \endcode
/// is matched by
/// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x"))))))
AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *ReceiverNode = Node.getInstanceReceiver();
return (ReceiverNode != nullptr &&
InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder,
Builder));
}
/// Matches when BaseName == Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) {
Selector Sel = Node.getSelector();
return BaseName.compare(Sel.getAsString()) == 0;
}
/// Matches when at least one of the supplied string equals to the
/// Selector.getAsString()
///
/// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:"));
/// matches both of the expressions below:
/// \code
/// [myObj methodA:argA];
/// [myObj methodB:argB];
/// \endcode
extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>,
StringRef,
internal::hasAnySelectorFunc>
hasAnySelector;
/// Matches ObjC selectors whose name contains
/// a substring matched by the given RegExp.
/// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?"));
/// matches the outer message expr in the code below, but NOT the message
/// invocation for self.bodyView.
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) {
std::string SelectorString = Node.getSelector().getAsString();
return RegExp->match(SelectorString);
}
/// Matches when the selector is the empty selector
///
/// Matches only when the selector of the objCMessageExpr is NULL. This may
/// represent an error condition in the tree!
AST_MATCHER(ObjCMessageExpr, hasNullSelector) {
return Node.getSelector().isNull();
}
/// Matches when the selector is a Unary Selector
///
/// matcher = objCMessageExpr(matchesSelector(hasUnarySelector());
/// matches self.bodyView in the code below, but NOT the outer message
/// invocation of "loadHTMLString:baseURL:".
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasUnarySelector) {
return Node.getSelector().isUnarySelector();
}
/// Matches when the selector is a keyword selector
///
/// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame
/// message expression in
///
/// \code
/// UIWebView *webView = ...;
/// CGRect bodyFrame = webView.frame;
/// bodyFrame.size.height = self.bodyContentHeight;
/// webView.frame = bodyFrame;
/// // ^---- matches here
/// \endcode
AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) {
return Node.getSelector().isKeywordSelector();
}
/// Matches when the selector has the specified number of arguments
///
/// matcher = objCMessageExpr(numSelectorArgs(0));
/// matches self.bodyView in the code below
///
/// matcher = objCMessageExpr(numSelectorArgs(2));
/// matches the invocation of "loadHTMLString:baseURL:" but not that
/// of self.bodyView
/// \code
/// [self.bodyView loadHTMLString:html baseURL:NULL];
/// \endcode
AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) {
return Node.getSelector().getNumArgs() == N;
}
/// Matches if the call expression's callee expression matches.
///
/// Given
/// \code
/// class Y { void x() { this->x(); x(); Y y; y.x(); } };
/// void f() { f(); }
/// \endcode
/// callExpr(callee(expr()))
/// matches this->x(), x(), y.x(), f()
/// with callee(...)
/// matching this->x, x, y.x, f respectively
///
/// Note: Callee cannot take the more general internal::Matcher<Expr>
/// because this introduces ambiguous overloads with calls to Callee taking a
/// internal::Matcher<Decl>, as the matcher hierarchy is purely
/// implemented in terms of implicit casts.
AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>,
InnerMatcher) {
const Expr *ExprNode = Node.getCallee();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the call expression's callee's declaration matches the
/// given matcher.
///
/// Example matches y.x() (matcher = callExpr(callee(
/// cxxMethodDecl(hasName("x")))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y y; y.x(); }
/// \endcode
AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
1) {
return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder);
}
/// Matches if the expression's or declaration's type matches a type
/// matcher.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
ValueDecl),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return InnerMatcher.matches(QT, Finder, Builder);
return false;
}
/// Overloaded to match the declaration of the expression's or value
/// declaration's type.
///
/// In case of a value declaration (for example a variable declaration),
/// this resolves one layer of indirection. For example, in the value
/// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of
/// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the
/// declaration of x.
///
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
/// \endcode
///
/// Example matches class Derived
/// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base"))))))
/// \code
/// class Base {};
/// class Derived : Base {};
/// \endcode
///
/// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>,
/// Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl,
CXXBaseSpecifier),
internal::Matcher<Decl>, InnerMatcher, 1) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder);
return false;
}
/// Matches if the type location of the declarator decl's type matches
/// the inner matcher.
///
/// Given
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
if (!Node.getTypeSourceInfo())
// This happens for example for implicit destructors.
return false;
return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
///
/// Given
/// \code
/// class Y { public: void x(); };
/// void z() { Y* y; y->x(); }
/// \endcode
/// cxxMemberCallExpr(on(hasType(asString("class Y *"))))
/// matches y->x()
AST_MATCHER_P(QualType, asString, std::string, Name) {
return Name == Node.getAsString();
}
/// Matches if the matched type is a pointer type and the pointee type
/// matches the specified matcher.
///
/// Example matches y->x()
/// (matcher = cxxMemberCallExpr(on(hasType(pointsTo
/// cxxRecordDecl(hasName("Y")))))))
/// \code
/// class Y { public: void x(); };
/// void z() { Y *y; y->x(); }
/// \endcode
AST_MATCHER_P(
QualType, pointsTo, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isAnyPointerType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Overloaded to match the pointee type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>,
InnerMatcher, 1) {
return pointsTo(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches if the matched type matches the unqualified desugared
/// type of the matched node.
///
/// For example, in:
/// \code
/// class A {};
/// using B = A;
/// \endcode
/// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches
/// both B and A.
AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>,
InnerMatcher) {
return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder,
Builder);
}
/// Matches if the matched type is a reference type and the referenced
/// type matches the specified matcher.
///
/// Example matches X &x and const X &y
/// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X"))))))
/// \code
/// class X {
/// void a(X b) {
/// X &x = b;
/// const X &y = b;
/// }
/// };
/// \endcode
AST_MATCHER_P(QualType, references, internal::Matcher<QualType>,
InnerMatcher) {
return (!Node.isNull() && Node->isReferenceType() &&
InnerMatcher.matches(Node->getPointeeType(), Finder, Builder));
}
/// Matches QualTypes whose canonical type matches InnerMatcher.
///
/// Given:
/// \code
/// typedef int &int_ref;
/// int a;
/// int_ref b = a;
/// \endcode
///
/// \c varDecl(hasType(qualType(referenceType()))))) will not match the
/// declaration of b but \c
/// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does.
AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>,
InnerMatcher) {
if (Node.isNull())
return false;
return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder);
}
/// Overloaded to match the referenced type's declaration.
AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>,
InnerMatcher, 1) {
return references(qualType(hasDeclaration(InnerMatcher)))
.matches(Node, Finder, Builder);
}
/// Matches on the implicit object argument of a member call expression. Unlike
/// `on`, matches the argument directly without stripping away anything.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// Y g();
/// class X : public Y { void g(); };
/// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); }
/// \endcode
/// cxxMemberCallExpr(onImplicitObjectArgument(hasType(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`.
/// cxxMemberCallExpr(on(callExpr()))
/// does not match `(g()).m()`, because the parens are not ignored.
///
/// FIXME: Overload to allow directly matching types?
AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *ExprNode = Node.getImplicitObjectArgument();
return (ExprNode != nullptr &&
InnerMatcher.matches(*ExprNode, Finder, Builder));
}
/// Matches if the type of the expression's implicit object argument either
/// matches the InnerMatcher, or is a pointer to a type that matches the
/// InnerMatcher.
///
/// Given
/// \code
/// class Y { public: void m(); };
/// class X : public Y { void g(); };
/// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); }
/// \endcode
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("Y")))))
/// matches `y.m()`, `p->m()` and `x.m()`.
/// cxxMemberCallExpr(thisPointerType(hasDeclaration(
/// cxxRecordDecl(hasName("X")))))
/// matches `x.g()`.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<QualType>, InnerMatcher, 0) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Overloaded to match the type's declaration.
AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType,
internal::Matcher<Decl>, InnerMatcher, 1) {
return onImplicitObjectArgument(
anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher))))
.matches(Node, Finder, Builder);
}
/// Matches a DeclRefExpr that refers to a declaration that matches the
/// specified matcher.
///
/// Example matches x in if(x)
/// (matcher = declRefExpr(to(varDecl(hasName("x")))))
/// \code
/// bool x;
/// if (x) {}
/// \endcode
AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>,
InnerMatcher) {
const Decl *DeclNode = Node.getDecl();
return (DeclNode != nullptr &&
InnerMatcher.matches(*DeclNode, Finder, Builder));
}
/// Matches a \c DeclRefExpr that refers to a declaration through a
/// specific using shadow declaration.
///
/// Given
/// \code
/// namespace a { void f() {} }
/// using a::f;
/// void g() {
/// f(); // Matches this ..
/// a::f(); // .. but not this.
/// }
/// \endcode
/// declRefExpr(throughUsingDecl(anything()))
/// matches \c f()
AST_MATCHER_P(DeclRefExpr, throughUsingDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
const NamedDecl *FoundDecl = Node.getFoundDecl();
if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl))
return InnerMatcher.matches(*UsingDecl, Finder, Builder);
return false;
}
/// Matches an \c OverloadExpr if any of the declarations in the set of
/// overloads matches the given matcher.
///
/// Given
/// \code
/// template <typename T> void foo(T);
/// template <typename T> void bar(T);
/// template <typename T> void baz(T t) {
/// foo(t);
/// bar(t);
/// }
/// \endcode
/// unresolvedLookupExpr(hasAnyDeclaration(
/// functionTemplateDecl(hasName("foo"))))
/// matches \c foo in \c foo(t); but not \c bar in \c bar(t);
AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(),
Node.decls_end(), Finder, Builder);
}
/// Matches the Decl of a DeclStmt which has a single declaration.
///
/// Given
/// \code
/// int a, b;
/// int c;
/// \endcode
/// declStmt(hasSingleDecl(anything()))
/// matches 'int c;' but not 'int a, b;'.
AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) {
if (Node.isSingleDecl()) {
const Decl *FoundDecl = Node.getSingleDecl();
return InnerMatcher.matches(*FoundDecl, Finder, Builder);
}
return false;
}
/// Matches a variable declaration that has an initializer expression
/// that matches the given matcher.
///
/// Example matches x (matcher = varDecl(hasInitializer(callExpr())))
/// \code
/// bool y() { return true; }
/// bool x = y();
/// \endcode
AST_MATCHER_P(
VarDecl, hasInitializer, internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Initializer = Node.getAnyInitializer();
return (Initializer != nullptr &&
InnerMatcher.matches(*Initializer, Finder, Builder));
}
/// \brief Matches a static variable with local scope.
///
/// Example matches y (matcher = varDecl(isStaticLocal()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// static int z;
/// \endcode
AST_MATCHER(VarDecl, isStaticLocal) {
return Node.isStaticLocal();
}
/// Matches a variable declaration that has function scope and is a
/// non-static local variable.
///
/// Example matches x (matcher = varDecl(hasLocalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasLocalStorage) {
return Node.hasLocalStorage();
}
/// Matches a variable declaration that does not have local storage.
///
/// Example matches y and z (matcher = varDecl(hasGlobalStorage())
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
AST_MATCHER(VarDecl, hasGlobalStorage) {
return Node.hasGlobalStorage();
}
/// Matches a variable declaration that has automatic storage duration.
///
/// Example matches x, but not y, z, or a.
/// (matcher = varDecl(hasAutomaticStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasAutomaticStorageDuration) {
return Node.getStorageDuration() == SD_Automatic;
}
/// Matches a variable declaration that has static storage duration.
/// It includes the variable declared at namespace scope and those declared
/// with "static" and "extern" storage class specifiers.
///
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// static int b;
/// extern int c;
/// varDecl(hasStaticStorageDuration())
/// matches the function declaration y, a, b and c.
/// \endcode
AST_MATCHER(VarDecl, hasStaticStorageDuration) {
return Node.getStorageDuration() == SD_Static;
}
/// Matches a variable declaration that has thread storage duration.
///
/// Example matches z, but not x, z, or a.
/// (matcher = varDecl(hasThreadStorageDuration())
/// \code
/// void f() {
/// int x;
/// static int y;
/// thread_local int z;
/// }
/// int a;
/// \endcode
AST_MATCHER(VarDecl, hasThreadStorageDuration) {
return Node.getStorageDuration() == SD_Thread;
}
/// Matches a variable declaration that is an exception variable from
/// a C++ catch block, or an Objective-C \@catch statement.
///
/// Example matches x (matcher = varDecl(isExceptionVariable())
/// \code
/// void f(int y) {
/// try {
/// } catch (int x) {
/// }
/// }
/// \endcode
AST_MATCHER(VarDecl, isExceptionVariable) {
return Node.isExceptionVariable();
}
/// Checks that a call expression or a constructor call expression has
/// a specific number of arguments (including absent default arguments).
///
/// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
/// \code
/// void f(int x, int y);
/// f(0, 0);
/// \endcode
AST_POLYMORPHIC_MATCHER_P(argumentCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N) {
return Node.getNumArgs() == N;
}
/// Matches the n'th argument of a call expression or a constructor
/// call expression.
///
/// Example matches y in x(y)
/// (matcher = callExpr(hasArgument(0, declRefExpr())))
/// \code
/// void x(int) { int y; x(y); }
/// \endcode
AST_POLYMORPHIC_MATCHER_P2(hasArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr,
ObjCMessageExpr),
unsigned, N, internal::Matcher<Expr>, InnerMatcher) {
return (N < Node.getNumArgs() &&
InnerMatcher.matches(
*Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder));
}
/// Matches the n'th item of an initializer list expression.
///
/// Example matches y.
/// (matcher = initListExpr(hasInit(0, expr())))
/// \code
/// int x{y}.
/// \endcode
AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
return N < Node.getNumInits() &&
InnerMatcher.matches(*Node.getInit(N), Finder, Builder);
}
/// Matches declaration statements that contain a specific number of
/// declarations.
///
/// Example: Given
/// \code
/// int a, b;
/// int c;
/// int d = 2, e;
/// \endcode
/// declCountIs(2)
/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) {
return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N;
}
/// Matches the n'th declaration of a declaration statement.
///
/// Note that this does not work for global declarations because the AST
/// breaks up multiple-declaration DeclStmt's into multiple single-declaration
/// DeclStmt's.
/// Example: Given non-global declarations
/// \code
/// int a, b = 0;
/// int c;
/// int d = 2, e;
/// \endcode
/// declStmt(containsDeclaration(
/// 0, varDecl(hasInitializer(anything()))))
/// matches only 'int d = 2, e;', and
/// declStmt(containsDeclaration(1, varDecl()))
/// \code
/// matches 'int a, b = 0' as well as 'int d = 2, e;'
/// but 'int c;' is not matched.
/// \endcode
AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N,
internal::Matcher<Decl>, InnerMatcher) {
const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end());
if (N >= NumDecls)
return false;
DeclStmt::const_decl_iterator Iterator = Node.decl_begin();
std::advance(Iterator, N);
return InnerMatcher.matches(**Iterator, Finder, Builder);
}
/// Matches a C++ catch statement that has a catch-all handler.
///
/// Given
/// \code
/// try {
/// // ...
/// } catch (int) {
/// // ...
/// } catch (...) {
/// // ...
/// }
/// \endcode
/// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int).
AST_MATCHER(CXXCatchStmt, isCatchAll) {
return Node.getExceptionDecl() == nullptr;
}
/// Matches a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(
/// hasAnyConstructorInitializer(anything())
/// )))
/// record matches Foo, hasAnyConstructorInitializer matches foo_(1)
AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(),
Node.init_end(), Finder, Builder);
}
/// Matches the field declaration of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// forField(hasName("foo_"))))))
/// matches Foo
/// with forField matching foo_
AST_MATCHER_P(CXXCtorInitializer, forField,
internal::Matcher<FieldDecl>, InnerMatcher) {
const FieldDecl *NodeAsDecl = Node.getAnyMember();
return (NodeAsDecl != nullptr &&
InnerMatcher.matches(*NodeAsDecl, Finder, Builder));
}
/// Matches the initializer expression of a constructor initializer.
///
/// Given
/// \code
/// struct Foo {
/// Foo() : foo_(1) { }
/// int foo_;
/// };
/// \endcode
/// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer(
/// withInitializer(integerLiteral(equals(1)))))))
/// matches Foo
/// with withInitializer matching (1)
AST_MATCHER_P(CXXCtorInitializer, withInitializer,
internal::Matcher<Expr>, InnerMatcher) {
const Expr* NodeAsExpr = Node.getInit();
return (NodeAsExpr != nullptr &&
InnerMatcher.matches(*NodeAsExpr, Finder, Builder));
}
/// Matches a constructor initializer if it is explicitly written in
/// code (as opposed to implicitly added by the compiler).
///
/// Given
/// \code
/// struct Foo {
/// Foo() { }
/// Foo(int) : foo_("A") { }
/// string foo_;
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten()))
/// will match Foo(int), but not Foo()
AST_MATCHER(CXXCtorInitializer, isWritten) {
return Node.isWritten();
}
/// Matches a constructor initializer if it is initializing a base, as
/// opposed to a member.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer()))
/// will match E(), but not match D(int).
AST_MATCHER(CXXCtorInitializer, isBaseInitializer) {
return Node.isBaseInitializer();
}
/// Matches a constructor initializer if it is initializing a member, as
/// opposed to a base.
///
/// Given
/// \code
/// struct B {};
/// struct D : B {
/// int I;
/// D(int i) : I(i) {}
/// };
/// struct E : B {
/// E() : B() {}
/// };
/// \endcode
/// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer()))
/// will match D(int), but not match E().
AST_MATCHER(CXXCtorInitializer, isMemberInitializer) {
return Node.isMemberInitializer();
}
/// Matches any argument of a call expression or a constructor call
/// expression, or an ObjC-message-send expression.
///
/// Given
/// \code
/// void x(int, int, int) { int y; x(1, y, 42); }
/// \endcode
/// callExpr(hasAnyArgument(declRefExpr()))
/// matches x(1, y, 42)
/// with hasAnyArgument(...)
/// matching y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// void foo(I *i) { [i f:12]; }
/// \endcode
/// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12))))
/// matches [i f:12]
AST_POLYMORPHIC_MATCHER_P(hasAnyArgument,
AST_POLYMORPHIC_SUPPORTED_TYPES(
CallExpr, CXXConstructExpr,
CXXUnresolvedConstructExpr, ObjCMessageExpr),
internal::Matcher<Expr>, InnerMatcher) {
for (const Expr *Arg : Node.arguments()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Arg, Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
return false;
}
/// Matches any capture of a lambda expression.
///
/// Given
/// \code
/// void foo() {
/// int x;
/// auto f = [x](){};
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(anything()))
/// matches [x](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>,
InnerMatcher, 0) {
for (const LambdaCapture &Capture : Node.captures()) {
if (Capture.capturesVariable()) {
BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) {
*Builder = std::move(Result);
return true;
}
}
}
return false;
}
/// Matches any capture of 'this' in a lambda expression.
///
/// Given
/// \code
/// struct foo {
/// void bar() {
/// auto f = [this](){};
/// }
/// }
/// \endcode
/// lambdaExpr(hasAnyCapture(cxxThisExpr()))
/// matches [this](){};
AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture,
internal::Matcher<CXXThisExpr>, InnerMatcher, 1) {
return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) {
return LC.capturesThis();
});
}
/// Matches a constructor call expression which uses list initialization.
AST_MATCHER(CXXConstructExpr, isListInitialization) {
return Node.isListInitialization();
}
/// Matches a constructor call expression which requires
/// zero initialization.
///
/// Given
/// \code
/// void foo() {
/// struct point { double x; double y; };
/// point pt[2] = { { 1.0, 2.0 } };
/// }
/// \endcode
/// initListExpr(has(cxxConstructExpr(requiresZeroInitialization()))
/// will match the implicit array filler for pt[1].
AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) {
return Node.requiresZeroInitialization();
}
/// Matches the n'th parameter of a function or an ObjC method
/// declaration or a block.
///
/// Given
/// \code
/// class X { void f(int x) {} };
/// \endcode
/// cxxMethodDecl(hasParameter(0, hasType(varDecl())))
/// matches f(int x) {}
/// with hasParameter(...)
/// matching int x
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasParameter(0, hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P2(hasParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
unsigned, N, internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return (N < Node.parameters().size()
&& InnerMatcher.matches(*Node.parameters()[N], Finder, Builder));
}
/// Matches all arguments and their respective ParmVarDecl.
///
/// Given
/// \code
/// void f(int i);
/// int y;
/// f(y);
/// \endcode
/// callExpr(
/// forEachArgumentWithParam(
/// declRefExpr(to(varDecl(hasName("y")))),
/// parmVarDecl(hasType(isInteger()))
/// ))
/// matches f(y);
/// with declRefExpr(...)
/// matching int y
/// and parmVarDecl(...)
/// matching int i
AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam,
AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr,
CXXConstructExpr),
internal::Matcher<Expr>, ArgMatcher,
internal::Matcher<ParmVarDecl>, ParamMatcher) {
BoundNodesTreeBuilder Result;
// The first argument of an overloaded member operator is the implicit object
// argument of the method which should not be matched against a parameter, so
// we skip over it here.
BoundNodesTreeBuilder Matches;
unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl()))
.matches(Node, Finder, &Matches)
? 1
: 0;
int ParamIndex = 0;
bool Matched = false;
for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()),
Finder, &ArgMatches)) {
BoundNodesTreeBuilder ParamMatches(ArgMatches);
if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl(
hasParameter(ParamIndex, ParamMatcher)))),
callExpr(callee(functionDecl(
hasParameter(ParamIndex, ParamMatcher))))))
.matches(Node, Finder, &ParamMatches)) {
Result.addMatch(ParamMatches);
Matched = true;
}
}
++ParamIndex;
}
*Builder = std::move(Result);
return Matched;
}
/// Matches the ParmVarDecl nodes that are at the N'th position in the parameter
/// list. The parameter list could be that of either a block, function, or
/// objc-method.
///
///
/// Given
///
/// \code
/// void f(int a, int b, int c) {
/// }
/// \endcode
///
/// ``parmVarDecl(isAtPosition(0))`` matches ``int a``.
///
/// ``parmVarDecl(isAtPosition(1))`` matches ``int b``.
AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) {
const clang::DeclContext *Context = Node.getParentFunctionOrMethod();
if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context))
return N < Decl->param_size() && Decl->getParamDecl(N) == &Node;
return false;
}
/// Matches any parameter of a function or an ObjC method declaration or a
/// block.
///
/// Does not match the 'this' parameter of a method.
///
/// Given
/// \code
/// class X { void f(int x, int y, int z) {} };
/// \endcode
/// cxxMethodDecl(hasAnyParameter(hasName("y")))
/// matches f(int x, int y, int z) {}
/// with hasAnyParameter(...)
/// matching int y
///
/// For ObjectiveC, given
/// \code
/// @interface I - (void) f:(int) y; @end
/// \endcode
//
/// the matcher objcMethodDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of method f with hasParameter
/// matching y.
///
/// For blocks, given
/// \code
/// b = ^(int y) { printf("%d", y) };
/// \endcode
///
/// the matcher blockDecl(hasAnyParameter(hasName("y")))
/// matches the declaration of the block b with hasParameter
/// matching y.
AST_POLYMORPHIC_MATCHER_P(hasAnyParameter,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
ObjCMethodDecl,
BlockDecl),
internal::Matcher<ParmVarDecl>,
InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(),
Node.param_end(), Finder, Builder);
}
/// Matches \c FunctionDecls and \c FunctionProtoTypes that have a
/// specific parameter count.
///
/// Given
/// \code
/// void f(int i) {}
/// void g(int i, int j) {}
/// void h(int i, int j);
/// void j(int i);
/// void k(int x, int y, int z, ...);
/// \endcode
/// functionDecl(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(2))
/// matches \c g and \c h
/// functionProtoType(parameterCountIs(3))
/// matches \c k
AST_POLYMORPHIC_MATCHER_P(parameterCountIs,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType),
unsigned, N) {
return Node.getNumParams() == N;
}
/// Matches \c FunctionDecls that have a noreturn attribute.
///
/// Given
/// \code
/// void nope();
/// [[noreturn]] void a();
/// __attribute__((noreturn)) void b();
/// struct c { [[noreturn]] c(); };
/// \endcode
/// functionDecl(isNoReturn())
/// matches all of those except
/// \code
/// void nope();
/// \endcode
AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); }
/// Matches the return type of a function declaration.
///
/// Given:
/// \code
/// class X { int f() { return 1; } };
/// \endcode
/// cxxMethodDecl(returns(asString("int")))
/// matches int f() { return 1; }
AST_MATCHER_P(FunctionDecl, returns,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getReturnType(), Finder, Builder);
}
/// Matches extern "C" function or variable declarations.
///
/// Given:
/// \code
/// extern "C" void f() {}
/// extern "C" { void g() {} }
/// void h() {}
/// extern "C" int x = 1;
/// extern "C" int y = 2;
/// int z = 3;
/// \endcode
/// functionDecl(isExternC())
/// matches the declaration of f and g, but not the declaration of h.
/// varDecl(isExternC())
/// matches the declaration of x and y, but not the declaration of z.
AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.isExternC();
}
/// Matches variable/function declarations that have "static" storage
/// class specifier ("static" keyword) written in the source.
///
/// Given:
/// \code
/// static void f() {}
/// static int i = 0;
/// extern int j;
/// int k;
/// \endcode
/// functionDecl(isStaticStorageClass())
/// matches the function declaration f.
/// varDecl(isStaticStorageClass())
/// matches the variable declaration i.
AST_POLYMORPHIC_MATCHER(isStaticStorageClass,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
VarDecl)) {
return Node.getStorageClass() == SC_Static;
}
/// Matches deleted function declarations.
///
/// Given:
/// \code
/// void Func();
/// void DeletedFunc() = delete;
/// \endcode
/// functionDecl(isDeleted())
/// matches the declaration of DeletedFunc, but not Func.
AST_MATCHER(FunctionDecl, isDeleted) {
return Node.isDeleted();
}
/// Matches defaulted function declarations.
///
/// Given:
/// \code
/// class A { ~A(); };
/// class B { ~B() = default; };
/// \endcode
/// functionDecl(isDefaulted())
/// matches the declaration of ~B, but not ~A.
AST_MATCHER(FunctionDecl, isDefaulted) {
return Node.isDefaulted();
}
/// Matches functions that have a dynamic exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() noexcept(true);
/// void i() noexcept(false);
/// void j() throw();
/// void k() throw(int);
/// void l() throw(...);
/// \endcode
/// functionDecl(hasDynamicExceptionSpec()) and
/// functionProtoType(hasDynamicExceptionSpec())
/// match the declarations of j, k, and l, but not f, g, h, or i.
AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node))
return FnTy->hasDynamicExceptionSpec();
return false;
}
/// Matches functions that have a non-throwing exception specification.
///
/// Given:
/// \code
/// void f();
/// void g() noexcept;
/// void h() throw();
/// void i() throw(int);
/// void j() noexcept(false);
/// \endcode
/// functionDecl(isNoThrow()) and functionProtoType(isNoThrow())
/// match the declarations of g, and h, but not f, i or j.
AST_POLYMORPHIC_MATCHER(isNoThrow,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
FunctionProtoType)) {
const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node);
// If the function does not have a prototype, then it is assumed to be a
// throwing function (as it would if the function did not have any exception
// specification).
if (!FnTy)
return false;
// Assume the best for any unresolved exception specification.
if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType()))
return true;
return FnTy->isNothrow();
}
/// Matches constexpr variable and function declarations,
/// and if constexpr.
///
/// Given:
/// \code
/// constexpr int foo = 42;
/// constexpr int bar();
/// void baz() { if constexpr(1 > 0) {} }
/// \endcode
/// varDecl(isConstexpr())
/// matches the declaration of foo.
/// functionDecl(isConstexpr())
/// matches the declaration of bar.
/// ifStmt(isConstexpr())
/// matches the if statement in baz.
AST_POLYMORPHIC_MATCHER(isConstexpr,
AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl,
FunctionDecl,
IfStmt)) {
return Node.isConstexpr();
}
/// Matches selection statements with initializer.
///
/// Given:
/// \code
/// void foo() {
/// if (int i = foobar(); i > 0) {}
/// switch (int i = foobar(); i) {}
/// for (auto& a = get_range(); auto& x : a) {}
/// }
/// void bar() {
/// if (foobar() > 0) {}
/// switch (foobar()) {}
/// for (auto& x : get_range()) {}
/// }
/// \endcode
/// ifStmt(hasInitStatement(anything()))
/// matches the if statement in foo but not in bar.
/// switchStmt(hasInitStatement(anything()))
/// matches the switch statement in foo but not in bar.
/// cxxForRangeStmt(hasInitStatement(anything()))
/// matches the range for statement in foo but not in bar.
AST_POLYMORPHIC_MATCHER_P(hasInitStatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt,
CXXForRangeStmt),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *Init = Node.getInit();
return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder);
}
/// Matches the condition expression of an if statement, for loop,
/// switch statement or conditional operator.
///
/// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true))))
/// \code
/// if (true) {}
/// \endcode
AST_POLYMORPHIC_MATCHER_P(
hasCondition,
AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt,
SwitchStmt, AbstractConditionalOperator),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const Condition = Node.getCond();
return (Condition != nullptr &&
InnerMatcher.matches(*Condition, Finder, Builder));
}
/// Matches the then-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) true; else false;
/// \endcode
AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Then = Node.getThen();
return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder));
}
/// Matches the else-statement of an if statement.
///
/// Examples matches the if statement
/// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true)))))
/// \code
/// if (false) false; else true;
/// \endcode
AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Else = Node.getElse();
return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder));
}
/// Matches if a node equals a previously bound node.
///
/// Matches a node if it equals the node previously bound to \p ID.
///
/// Given
/// \code
/// class X { int a; int b; };
/// \endcode
/// cxxRecordDecl(
/// has(fieldDecl(hasName("a"), hasType(type().bind("t")))),
/// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t"))))))
/// matches the class \c X, as \c a and \c b have the same type.
///
/// Note that when multiple matches are involved via \c forEach* matchers,
/// \c equalsBoundNodes acts as a filter.
/// For example:
/// compoundStmt(
/// forEachDescendant(varDecl().bind("d")),
/// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d"))))))
/// will trigger a match for each combination of variable declaration
/// and reference to that variable declaration within a compound statement.
AST_POLYMORPHIC_MATCHER_P(equalsBoundNode,
AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type,
QualType),
std::string, ID) {
// FIXME: Figure out whether it makes sense to allow this
// on any other node types.
// For *Loc it probably does not make sense, as those seem
// unique. For NestedNameSepcifier it might make sense, as
// those also have pointer identity, but I'm not sure whether
// they're ever reused.
internal::NotEqualsBoundNodePredicate Predicate;
Predicate.ID = ID;
Predicate.Node = DynTypedNode::create(Node);
return Builder->removeBindings(Predicate);
}
/// Matches the condition variable statement in an if statement.
///
/// Given
/// \code
/// if (A* a = GetAPointer()) {}
/// \endcode
/// hasConditionVariableStatement(...)
/// matches 'A* a = GetAPointer()'.
AST_MATCHER_P(IfStmt, hasConditionVariableStatement,
internal::Matcher<DeclStmt>, InnerMatcher) {
const DeclStmt* const DeclarationStatement =
Node.getConditionVariableDeclStmt();
return DeclarationStatement != nullptr &&
InnerMatcher.matches(*DeclarationStatement, Finder, Builder);
}
/// Matches the index expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasIndex(integerLiteral()))
/// matches \c i[1] with the \c integerLiteral() matching \c 1
AST_MATCHER_P(ArraySubscriptExpr, hasIndex,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getIdx())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches the base expression of an array subscript expression.
///
/// Given
/// \code
/// int i[5];
/// void f() { i[1] = 42; }
/// \endcode
/// arraySubscriptExpression(hasBase(implicitCastExpr(
/// hasSourceExpression(declRefExpr()))))
/// matches \c i[1] with the \c declRefExpr() matching \c i
AST_MATCHER_P(ArraySubscriptExpr, hasBase,
internal::Matcher<Expr>, InnerMatcher) {
if (const Expr* Expression = Node.getBase())
return InnerMatcher.matches(*Expression, Finder, Builder);
return false;
}
/// Matches a 'for', 'while', 'do while' statement or a function
/// definition that has a given body.
///
/// Given
/// \code
/// for (;;) {}
/// \endcode
/// hasBody(compoundStmt())
/// matches 'for (;;) {}'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasBody,
AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt,
WhileStmt,
CXXForRangeStmt,
FunctionDecl),
internal::Matcher<Stmt>, InnerMatcher) {
const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node);
return (Statement != nullptr &&
InnerMatcher.matches(*Statement, Finder, Builder));
}
/// Matches compound statements where at least one substatement matches
/// a given matcher. Also matches StmtExprs that have CompoundStmt as children.
///
/// Given
/// \code
/// { {}; 1+2; }
/// \endcode
/// hasAnySubstatement(compoundStmt())
/// matches '{ {}; 1+2; }'
/// with compoundStmt()
/// matching '{}'
AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement,
AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt,
StmtExpr),
internal::Matcher<Stmt>, InnerMatcher) {
const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node);
return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(),
CS->body_end(), Finder, Builder);
}
/// Checks that a compound statement contains a specific number of
/// child statements.
///
/// Example: Given
/// \code
/// { for (;;) {} }
/// \endcode
/// compoundStmt(statementCountIs(0)))
/// matches '{}'
/// but does not match the outer compound statement.
AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
return Node.size() == N;
}
/// Matches literals that are equal to the given value of type ValueT.
///
/// Given
/// \code
/// f('\0', false, 3.14, 42);
/// \endcode
/// characterLiteral(equals(0))
/// matches '\0'
/// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0))
/// match false
/// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2))
/// match 3.14
/// integerLiteral(equals(42))
/// matches 42
///
/// Note that you cannot directly match a negative numeric literal because the
/// minus sign is not part of the literal: It is a unary operator whose operand
/// is the positive numeric literal. Instead, you must use a unaryOperator()
/// matcher to match the minus sign:
///
/// unaryOperator(hasOperatorName("-"),
/// hasUnaryOperand(integerLiteral(equals(13))))
///
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
equals(const ValueT &Value) {
return internal::PolymorphicMatcherWithParam1<
internal::ValueEqualsMatcher,
ValueT>(Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
bool, Value, 0) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
IntegerLiteral),
unsigned, Value, 1) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral,
CXXBoolLiteralExpr,
FloatingLiteral,
IntegerLiteral),
double, Value, 2) {
return internal::ValueEqualsMatcher<NodeType, ParamT>(Value)
.matchesNode(Node);
}
/// Matches the operator Name of operator expressions (binary or
/// unary).
///
/// Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
/// \code
/// !(a || b)
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasOperatorName,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
UnaryOperator),
std::string, Name) {
return Name == Node.getOpcodeStr(Node.getOpcode());
}
/// Matches operator expressions (binary or unary) that have any of the
/// specified names.
///
/// hasAnyOperatorName("+", "-")
/// Is equivalent to
/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
extern const internal::VariadicFunction<
internal::PolymorphicMatcherWithParam1<
internal::HasAnyOperatorNameMatcher, std::vector<std::string>,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>,
StringRef, internal::hasAnyOperatorNameFunc>
hasAnyOperatorName;
/// Matches all kinds of assignment operators.
///
/// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 = s2
/// (matcher = cxxOperatorCallExpr(isAssignmentOperator()))
/// \code
/// struct S { S& operator=(const S&); };
/// void x() { S s1, s2; s1 = s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isAssignmentOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isAssignmentOp();
}
/// Matches comparison operators.
///
/// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator()))
/// \code
/// if (a == b)
/// a += b;
/// \endcode
///
/// Example 2: matches s1 < s2
/// (matcher = cxxOperatorCallExpr(isComparisonOperator()))
/// \code
/// struct S { bool operator<(const S& other); };
/// void x(S s1, S s2) { bool b1 = s1 < s2; }
/// \endcode
AST_POLYMORPHIC_MATCHER(isComparisonOperator,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
CXXOperatorCallExpr)) {
return Node.isComparisonOp();
}
/// Matches the left hand side of binary operator expressions.
///
/// Example matches a (matcher = binaryOperator(hasLHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasLHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *LeftHandSide = Node.getLHS();
return (LeftHandSide != nullptr &&
InnerMatcher.matches(*LeftHandSide, Finder, Builder));
}
/// Matches the right hand side of binary operator expressions.
///
/// Example matches b (matcher = binaryOperator(hasRHS()))
/// \code
/// a || b
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasRHS,
AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator,
ArraySubscriptExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *RightHandSide = Node.getRHS();
return (RightHandSide != nullptr &&
InnerMatcher.matches(*RightHandSide, Finder, Builder));
}
/// Matches if either the left hand side or the right hand side of a
/// binary operator matches.
inline internal::Matcher<BinaryOperator> hasEitherOperand(
const internal::Matcher<Expr> &InnerMatcher) {
return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher));
}
/// Matches if both matchers match with opposite sides of the binary operator.
///
/// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1),
/// integerLiteral(equals(2)))
/// \code
/// 1 + 2 // Match
/// 2 + 1 // Match
/// 1 + 1 // No match
/// 2 + 2 // No match
/// \endcode
inline internal::Matcher<BinaryOperator>
hasOperands(const internal::Matcher<Expr> &Matcher1,
const internal::Matcher<Expr> &Matcher2) {
return anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)),
allOf(hasLHS(Matcher2), hasRHS(Matcher1)));
}
/// Matches if the operand of a unary operator matches.
///
/// Example matches true (matcher = hasUnaryOperand(
/// cxxBoolLiteral(equals(true))))
/// \code
/// !true
/// \endcode
AST_MATCHER_P(UnaryOperator, hasUnaryOperand,
internal::Matcher<Expr>, InnerMatcher) {
const Expr * const Operand = Node.getSubExpr();
return (Operand != nullptr &&
InnerMatcher.matches(*Operand, Finder, Builder));
}
/// Matches if the cast's source expression
/// or opaque value's source expression matches the given matcher.
///
/// Example 1: matches "a string"
/// (matcher = castExpr(hasSourceExpression(cxxConstructExpr())))
/// \code
/// class URL { URL(string); };
/// URL url = "a string";
/// \endcode
///
/// Example 2: matches 'b' (matcher =
/// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr())))
/// \code
/// int a = b ?: 1;
/// \endcode
AST_POLYMORPHIC_MATCHER_P(hasSourceExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr,
OpaqueValueExpr),
internal::Matcher<Expr>, InnerMatcher) {
const Expr *const SubExpression =
internal::GetSourceExpressionMatcher<NodeType>::get(Node);
return (SubExpression != nullptr &&
InnerMatcher.matches(*SubExpression, Finder, Builder));
}
/// Matches casts that has a given cast kind.
///
/// Example: matches the implicit cast around \c 0
/// (matcher = castExpr(hasCastKind(CK_NullToPointer)))
/// \code
/// int *p = 0;
/// \endcode
///
/// If the matcher is use from clang-query, CastKind parameter
/// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer").
AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) {
return Node.getCastKind() == Kind;
}
/// Matches casts whose destination type matches a given matcher.
///
/// (Note: Clang's AST refers to other conversions as "casts" too, and calls
/// actual casts "explicit" casts.)
AST_MATCHER_P(ExplicitCastExpr, hasDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
const QualType NodeType = Node.getTypeAsWritten();
return InnerMatcher.matches(NodeType, Finder, Builder);
}
/// Matches implicit casts whose destination type matches a given
/// matcher.
///
/// FIXME: Unit test this matcher
AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType,
internal::Matcher<QualType>, InnerMatcher) {
return InnerMatcher.matches(Node.getType(), Finder, Builder);
}
/// Matches TagDecl object that are spelled with "struct."
///
/// Example matches S, but not C, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isStruct) {
return Node.isStruct();
}
/// Matches TagDecl object that are spelled with "union."
///
/// Example matches U, but not C, S or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isUnion) {
return Node.isUnion();
}
/// Matches TagDecl object that are spelled with "class."
///
/// Example matches C, but not S, U or E.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isClass) {
return Node.isClass();
}
/// Matches TagDecl object that are spelled with "enum."
///
/// Example matches E, but not C, S or U.
/// \code
/// struct S {};
/// class C {};
/// union U {};
/// enum E {};
/// \endcode
AST_MATCHER(TagDecl, isEnum) {
return Node.isEnum();
}
/// Matches the true branch expression of a conditional operator.
///
/// Example 1 (conditional ternary operator): matches a
/// \code
/// condition ? a : b
/// \endcode
///
/// Example 2 (conditional binary operator): matches opaqueValueExpr(condition)
/// \code
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getTrueExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches the false branch expression of a conditional operator
/// (binary or ternary).
///
/// Example matches b
/// \code
/// condition ? a : b
/// condition ?: b
/// \endcode
AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression,
internal::Matcher<Expr>, InnerMatcher) {
const Expr *Expression = Node.getFalseExpr();
return (Expression != nullptr &&
InnerMatcher.matches(*Expression, Finder, Builder));
}
/// Matches if a declaration has a body attached.
///
/// Example matches A, va, fa
/// \code
/// class A {};
/// class B; // Doesn't match, as it has no body.
/// int va;
/// extern int vb; // Doesn't match, as it doesn't define the variable.
/// void fa() {}
/// void fb(); // Doesn't match, as it has no body.
/// @interface X
/// - (void)ma; // Doesn't match, interface is declaration.
/// @end
/// @implementation X
/// - (void)ma {}
/// @end
/// \endcode
///
/// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>,
/// Matcher<ObjCMethodDecl>
AST_POLYMORPHIC_MATCHER(isDefinition,
AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl,
ObjCMethodDecl,
FunctionDecl)) {
return Node.isThisDeclarationADefinition();
}
/// Matches if a function declaration is variadic.
///
/// Example matches f, but not g or h. The function i will not match, even when
/// compiled in C mode.
/// \code
/// void f(...);
/// void g(int);
/// template <typename... Ts> void h(Ts...);
/// void i();
/// \endcode
AST_MATCHER(FunctionDecl, isVariadic) {
return Node.isVariadic();
}
/// Matches the class declaration that the given method declaration
/// belongs to.
///
/// FIXME: Generalize this for other kinds of declarations.
/// FIXME: What other kind of declarations would we need to generalize
/// this to?
///
/// Example matches A() in the last line
/// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl(
/// ofClass(hasName("A"))))))
/// \code
/// class A {
/// public:
/// A();
/// };
/// A a = A();
/// \endcode
AST_MATCHER_P(CXXMethodDecl, ofClass,
internal::Matcher<CXXRecordDecl>, InnerMatcher) {
const CXXRecordDecl *Parent = Node.getParent();
return (Parent != nullptr &&
InnerMatcher.matches(*Parent, Finder, Builder));
}
/// Matches each method overridden by the given method. This matcher may
/// produce multiple matches.
///
/// Given
/// \code
/// class A { virtual void f(); };
/// class B : public A { void f(); };
/// class C : public B { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note
/// that B::f is not overridden by C::f).
///
/// The check can produce multiple matches in case of multiple inheritance, e.g.
/// \code
/// class A1 { virtual void f(); };
/// class A2 { virtual void f(); };
/// class C : public A1, public A2 { void f(); };
/// \endcode
/// cxxMethodDecl(ofClass(hasName("C")),
/// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d")
/// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and
/// once with "b" binding "A2::f" and "d" binding "C::f".
AST_MATCHER_P(CXXMethodDecl, forEachOverridden,
internal::Matcher<CXXMethodDecl>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *Overridden : Node.overridden_methods()) {
BoundNodesTreeBuilder OverriddenBuilder(*Builder);
const bool OverriddenMatched =
InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder);
if (OverriddenMatched) {
Matched = true;
Result.addMatch(OverriddenBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches declarations of virtual methods and C++ base specifers that specify
/// virtual inheritance.
///
/// Example:
/// \code
/// class A {
/// public:
/// virtual void x(); // matches x
/// };
/// \endcode
///
/// Example:
/// \code
/// class Base {};
/// class DirectlyDerived : virtual Base {}; // matches Base
/// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base
/// \endcode
///
/// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier>
AST_POLYMORPHIC_MATCHER(isVirtual,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl,
CXXBaseSpecifier)) {
return Node.isVirtual();
}
/// Matches if the given method declaration has an explicit "virtual".
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// void x();
/// };
/// \endcode
/// matches A::x but not B::x
AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) {
return Node.isVirtualAsWritten();
}
/// Matches if the given method or class declaration is final.
///
/// Given:
/// \code
/// class A final {};
///
/// struct B {
/// virtual void f();
/// };
///
/// struct C : B {
/// void f() final;
/// };
/// \endcode
/// matches A and C::f, but not B, C, or B::f
AST_POLYMORPHIC_MATCHER(isFinal,
AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl,
CXXMethodDecl)) {
return Node.template hasAttr<FinalAttr>();
}
/// Matches if the given method declaration is pure.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x() = 0;
/// };
/// \endcode
/// matches A::x
AST_MATCHER(CXXMethodDecl, isPure) {
return Node.isPure();
}
/// Matches if the given method declaration is const.
///
/// Given
/// \code
/// struct A {
/// void foo() const;
/// void bar();
/// };
/// \endcode
///
/// cxxMethodDecl(isConst()) matches A::foo() but not A::bar()
AST_MATCHER(CXXMethodDecl, isConst) {
return Node.isConst();
}
/// Matches if the given method declaration declares a copy assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not
/// the second one.
AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) {
return Node.isCopyAssignmentOperator();
}
/// Matches if the given method declaration declares a move assignment
/// operator.
///
/// Given
/// \code
/// struct A {
/// A &operator=(const A &);
/// A &operator=(A &&);
/// };
/// \endcode
///
/// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not
/// the first one.
AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) {
return Node.isMoveAssignmentOperator();
}
/// Matches if the given method declaration overrides another method.
///
/// Given
/// \code
/// class A {
/// public:
/// virtual void x();
/// };
/// class B : public A {
/// public:
/// virtual void x();
/// };
/// \endcode
/// matches B::x
AST_MATCHER(CXXMethodDecl, isOverride) {
return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>();
}
/// Matches method declarations that are user-provided.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &) = default; // #2
/// S(S &&) = delete; // #3
/// };
/// \endcode
/// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3.
AST_MATCHER(CXXMethodDecl, isUserProvided) {
return Node.isUserProvided();
}
/// Matches member expressions that are called with '->' as opposed
/// to '.'.
///
/// Member calls on the implicit this pointer match as called with '->'.
///
/// Given
/// \code
/// class Y {
/// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
/// template <class T> void f() { this->f<T>(); f<T>(); }
/// int a;
/// static int b;
/// };
/// template <class T>
/// class Z {
/// void x() { this->m; }
/// };
/// \endcode
/// memberExpr(isArrow())
/// matches this->x, x, y.x, a, this->b
/// cxxDependentScopeMemberExpr(isArrow())
/// matches this->m
/// unresolvedMemberExpr(isArrow())
/// matches this->f<T>, f<T>
AST_POLYMORPHIC_MATCHER(
isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr)) {
return Node.isArrow();
}
/// Matches QualType nodes that are of integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isInteger())))
/// matches "a(int)", "b(long)", but not "c(double)".
AST_MATCHER(QualType, isInteger) {
return Node->isIntegerType();
}
/// Matches QualType nodes that are of unsigned integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isUnsignedInteger())))
/// matches "b(unsigned long)", but not "a(int)" and "c(double)".
AST_MATCHER(QualType, isUnsignedInteger) {
return Node->isUnsignedIntegerType();
}
/// Matches QualType nodes that are of signed integer type.
///
/// Given
/// \code
/// void a(int);
/// void b(unsigned long);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isSignedInteger())))
/// matches "a(int)", but not "b(unsigned long)" and "c(double)".
AST_MATCHER(QualType, isSignedInteger) {
return Node->isSignedIntegerType();
}
/// Matches QualType nodes that are of character type.
///
/// Given
/// \code
/// void a(char);
/// void b(wchar_t);
/// void c(double);
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isAnyCharacter())))
/// matches "a(char)", "b(wchar_t)", but not "c(double)".
AST_MATCHER(QualType, isAnyCharacter) {
return Node->isAnyCharacterType();
}
/// Matches QualType nodes that are of any pointer type; this includes
/// the Objective-C object pointer type, which is different despite being
/// syntactically similar.
///
/// Given
/// \code
/// int *i = nullptr;
///
/// @interface Foo
/// @end
/// Foo *f;
///
/// int j;
/// \endcode
/// varDecl(hasType(isAnyPointer()))
/// matches "int *i" and "Foo *f", but not "int j".
AST_MATCHER(QualType, isAnyPointer) {
return Node->isAnyPointerType();
}
/// Matches QualType nodes that are const-qualified, i.e., that
/// include "top-level" const.
///
/// Given
/// \code
/// void a(int);
/// void b(int const);
/// void c(const int);
/// void d(const int*);
/// void e(int const) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isConstQualified())))
/// matches "void b(int const)", "void c(const int)" and
/// "void e(int const) {}". It does not match d as there
/// is no top-level const on the parameter type "const int *".
AST_MATCHER(QualType, isConstQualified) {
return Node.isConstQualified();
}
/// Matches QualType nodes that are volatile-qualified, i.e., that
/// include "top-level" volatile.
///
/// Given
/// \code
/// void a(int);
/// void b(int volatile);
/// void c(volatile int);
/// void d(volatile int*);
/// void e(int volatile) {};
/// \endcode
/// functionDecl(hasAnyParameter(hasType(isVolatileQualified())))
/// matches "void b(int volatile)", "void c(volatile int)" and
/// "void e(int volatile) {}". It does not match d as there
/// is no top-level volatile on the parameter type "volatile int *".
AST_MATCHER(QualType, isVolatileQualified) {
return Node.isVolatileQualified();
}
/// Matches QualType nodes that have local CV-qualifiers attached to
/// the node, not hidden within a typedef.
///
/// Given
/// \code
/// typedef const int const_int;
/// const_int i;
/// int *const j;
/// int *volatile k;
/// int m;
/// \endcode
/// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k.
/// \c i is const-qualified but the qualifier is not local.
AST_MATCHER(QualType, hasLocalQualifiers) {
return Node.hasLocalQualifiers();
}
/// Matches a member expression where the member is matched by a
/// given matcher.
///
/// Given
/// \code
/// struct { int first, second; } first, second;
/// int i(second.first);
/// int j(first.second);
/// \endcode
/// memberExpr(member(hasName("first")))
/// matches second.first
/// but not first.second (because the member name there is "second").
AST_MATCHER_P(MemberExpr, member,
internal::Matcher<ValueDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder);
}
/// Matches a member expression where the object expression is matched by a
/// given matcher. Implicit object expressions are included; that is, it matches
/// use of implicit `this`.
///
/// Given
/// \code
/// struct X {
/// int m;
/// int f(X x) { x.m; return m; }
/// };
/// \endcode
/// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X")))))
/// matches `x.m`, but not `m`; however,
/// memberExpr(hasObjectExpression(hasType(pointsTo(
// cxxRecordDecl(hasName("X"))))))
/// matches `m` (aka. `this->m`), but not `x.m`.
AST_POLYMORPHIC_MATCHER_P(
hasObjectExpression,
AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr,
CXXDependentScopeMemberExpr),
internal::Matcher<Expr>, InnerMatcher) {
if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node))
if (E->isImplicitAccess())
return false;
return InnerMatcher.matches(*Node.getBase(), Finder, Builder);
}
/// Matches any using shadow declaration.
///
/// Given
/// \code
/// namespace X { void b(); }
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder, Builder);
}
/// Matches a using shadow declaration where the target declaration is
/// matched by the given matcher.
///
/// Given
/// \code
/// namespace X { int a; void b(); }
/// using X::a;
/// using X::b;
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
/// matches \code using X::b \endcode
/// but not \code using X::a \endcode
AST_MATCHER_P(UsingShadowDecl, hasTargetDecl,
internal::Matcher<NamedDecl>, InnerMatcher) {
return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder);
}
/// Matches template instantiations of function, class, or static
/// member variable template instantiations.
///
/// Given
/// \code
/// template <typename T> class X {}; class A {}; X<A> x;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; template class X<A>;
/// \endcode
/// or
/// \code
/// template <typename T> class X {}; class A {}; extern template class X<A>;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// matches the template instantiation of X<A>.
///
/// But given
/// \code
/// template <typename T> class X {}; class A {};
/// template <> class X<A> {}; X<A> x;
/// \endcode
/// cxxRecordDecl(hasName("::X"), isTemplateInstantiation())
/// does not match, as X<A> is an explicit template specialization.
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isTemplateInstantiation,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition ||
Node.getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDeclaration);
}
/// Matches declarations that are template instantiations or are inside
/// template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { T i; }
/// A(0);
/// A(0U);
/// \endcode
/// functionDecl(isInstantiated())
/// matches 'A(int) {...};' and 'A(unsigned) {...}'.
AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) {
auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())));
return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation)));
}
/// Matches statements inside of a template instantiation.
///
/// Given
/// \code
/// int j;
/// template<typename T> void A(T t) { T i; j += 42;}
/// A(0);
/// A(0U);
/// \endcode
/// declStmt(isInTemplateInstantiation())
/// matches 'int i;' and 'unsigned i'.
/// unless(stmt(isInTemplateInstantiation()))
/// will NOT match j += 42; as it's shared between the template definition and
/// instantiation.
AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) {
return stmt(
hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()),
functionDecl(isTemplateInstantiation())))));
}
/// Matches explicit template specializations of function, class, or
/// static member variable template instantiations.
///
/// Given
/// \code
/// template<typename T> void A(T t) { }
/// template<> void A(int N) { }
/// \endcode
/// functionDecl(isExplicitTemplateSpecialization())
/// matches the specialization A<int>().
///
/// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization,
AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl,
CXXRecordDecl)) {
return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization);
}
/// Matches \c TypeLocs for which the given inner
/// QualType-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc,
internal::Matcher<QualType>, InnerMatcher, 0) {
return internal::BindableMatcher<TypeLoc>(
new internal::TypeLocTypeMatcher(InnerMatcher));
}
/// Matches type \c bool.
///
/// Given
/// \code
/// struct S { bool func(); };
/// \endcode
/// functionDecl(returns(booleanType()))
/// matches "bool func();"
AST_MATCHER(Type, booleanType) {
return Node.isBooleanType();
}
/// Matches type \c void.
///
/// Given
/// \code
/// struct S { void func(); };
/// \endcode
/// functionDecl(returns(voidType()))
/// matches "void func();"
AST_MATCHER(Type, voidType) {
return Node.isVoidType();
}
template <typename NodeType>
using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>;
/// Matches builtin Types.
///
/// Given
/// \code
/// struct A {};
/// A a;
/// int b;
/// float c;
/// bool d;
/// \endcode
/// builtinType()
/// matches "int b", "float c" and "bool d"
extern const AstTypeMatcher<BuiltinType> builtinType;
/// Matches all kinds of arrays.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[4];
/// void f() { int c[a[0]]; }
/// \endcode
/// arrayType()
/// matches "int a[]", "int b[4]" and "int c[a[0]]";
extern const AstTypeMatcher<ArrayType> arrayType;
/// Matches C99 complex types.
///
/// Given
/// \code
/// _Complex float f;
/// \endcode
/// complexType()
/// matches "_Complex float f"
extern const AstTypeMatcher<ComplexType> complexType;
/// Matches any real floating-point type (float, double, long double).
///
/// Given
/// \code
/// int i;
/// float f;
/// \endcode
/// realFloatingPointType()
/// matches "float f" but not "int i"
AST_MATCHER(Type, realFloatingPointType) {
return Node.isRealFloatingType();
}
/// Matches arrays and C99 complex types that have a specific element
/// type.
///
/// Given
/// \code
/// struct A {};
/// A a[7];
/// int b[7];
/// \endcode
/// arrayType(hasElementType(builtinType()))
/// matches "int b[7]"
///
/// Usable as: Matcher<ArrayType>, Matcher<ComplexType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement,
AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType,
ComplexType));
/// Matches C arrays with a specified constant size.
///
/// Given
/// \code
/// void() {
/// int a[2];
/// int b[] = { 2, 3 };
/// int c[b[0]];
/// }
/// \endcode
/// constantArrayType()
/// matches "int a[2]"
extern const AstTypeMatcher<ConstantArrayType> constantArrayType;
/// Matches nodes that have the specified size.
///
/// Given
/// \code
/// int a[42];
/// int b[2 * 21];
/// int c[41], d[43];
/// char *s = "abcd";
/// wchar_t *ws = L"abcd";
/// char *w = "a";
/// \endcode
/// constantArrayType(hasSize(42))
/// matches "int a[42]" and "int b[2 * 21]"
/// stringLiteral(hasSize(4))
/// matches "abcd", L"abcd"
AST_POLYMORPHIC_MATCHER_P(hasSize,
AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType,
StringLiteral),
unsigned, N) {
return internal::HasSizeMatcher<NodeType>::hasSize(Node, N);
}
/// Matches C++ arrays whose size is a value-dependent expression.
///
/// Given
/// \code
/// template<typename T, int Size>
/// class array {
/// T data[Size];
/// };
/// \endcode
/// dependentSizedArrayType
/// matches "T data[Size]"
extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType;
/// Matches C arrays with unspecified size.
///
/// Given
/// \code
/// int a[] = { 2, 3 };
/// int b[42];
/// void f(int c[]) { int d[a[0]]; };
/// \endcode
/// incompleteArrayType()
/// matches "int a[]" and "int c[]"
extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType;
/// Matches C arrays with a specified size that is not an
/// integer-constant-expression.
///
/// Given
/// \code
/// void f() {
/// int a[] = { 2, 3 }
/// int b[42];
/// int c[a[0]];
/// }
/// \endcode
/// variableArrayType()
/// matches "int c[a[0]]"
extern const AstTypeMatcher<VariableArrayType> variableArrayType;
/// Matches \c VariableArrayType nodes that have a specific size
/// expression.
///
/// Given
/// \code
/// void f(int b) {
/// int a[b];
/// }
/// \endcode
/// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to(
/// varDecl(hasName("b")))))))
/// matches "int a[b]"
AST_MATCHER_P(VariableArrayType, hasSizeExpr,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder);
}
/// Matches atomic types.
///
/// Given
/// \code
/// _Atomic(int) i;
/// \endcode
/// atomicType()
/// matches "_Atomic(int) i"
extern const AstTypeMatcher<AtomicType> atomicType;
/// Matches atomic types with a specific value type.
///
/// Given
/// \code
/// _Atomic(int) i;
/// _Atomic(float) f;
/// \endcode
/// atomicType(hasValueType(isInteger()))
/// matches "_Atomic(int) i"
///
/// Usable as: Matcher<AtomicType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue,
AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType));
/// Matches types nodes representing C++11 auto types.
///
/// Given:
/// \code
/// auto n = 4;
/// int v[] = { 2, 3 }
/// for (auto i : v) { }
/// \endcode
/// autoType()
/// matches "auto n" and "auto i"
extern const AstTypeMatcher<AutoType> autoType;
/// Matches types nodes representing C++11 decltype(<expr>) types.
///
/// Given:
/// \code
/// short i = 1;
/// int j = 42;
/// decltype(i + j) result = i + j;
/// \endcode
/// decltypeType()
/// matches "decltype(i + j)"
extern const AstTypeMatcher<DecltypeType> decltypeType;
/// Matches \c AutoType nodes where the deduced type is a specific type.
///
/// Note: There is no \c TypeLoc for the deduced type and thus no
/// \c getDeducedLoc() matcher.
///
/// Given
/// \code
/// auto a = 1;
/// auto b = 2.0;
/// \endcode
/// autoType(hasDeducedType(isInteger()))
/// matches "auto a"
///
/// Usable as: Matcher<AutoType>
AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType,
AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType));
/// Matches \c DecltypeType nodes to find out the underlying type.
///
/// Given
/// \code
/// decltype(1) a = 1;
/// decltype(2.0) b = 2.0;
/// \endcode
/// decltypeType(hasUnderlyingType(isInteger()))
/// matches the type of "a"
///
/// Usable as: Matcher<DecltypeType>
AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType,
AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType));
/// Matches \c FunctionType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionType()
/// matches "int (*f)(int)" and the type of "g".
extern const AstTypeMatcher<FunctionType> functionType;
/// Matches \c FunctionProtoType nodes.
///
/// Given
/// \code
/// int (*f)(int);
/// void g();
/// \endcode
/// functionProtoType()
/// matches "int (*f)(int)" and the type of "g" in C++ mode.
/// In C mode, "g" is not matched because it does not contain a prototype.
extern const AstTypeMatcher<FunctionProtoType> functionProtoType;
/// Matches \c ParenType nodes.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int *array_of_ptrs[4];
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not
/// \c array_of_ptrs.
extern const AstTypeMatcher<ParenType> parenType;
/// Matches \c ParenType nodes where the inner type is a specific type.
///
/// Given
/// \code
/// int (*ptr_to_array)[4];
/// int (*ptr_to_func)(int);
/// \endcode
///
/// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches
/// \c ptr_to_func but not \c ptr_to_array.
///
/// Usable as: Matcher<ParenType>
AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType,
AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType));
/// Matches block pointer types, i.e. types syntactically represented as
/// "void (^)(int)".
///
/// The \c pointee is always required to be a \c FunctionType.
extern const AstTypeMatcher<BlockPointerType> blockPointerType;
/// Matches member pointer types.
/// Given
/// \code
/// struct A { int i; }
/// A::* ptr = A::i;
/// \endcode
/// memberPointerType()
/// matches "A::* ptr"
extern const AstTypeMatcher<MemberPointerType> memberPointerType;
/// Matches pointer types, but does not match Objective-C object pointer
/// types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int c = 5;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "int *a", but does not match "Foo *f".
extern const AstTypeMatcher<PointerType> pointerType;
/// Matches an Objective-C object pointer type, which is different from
/// a pointer type, despite being syntactically similar.
///
/// Given
/// \code
/// int *a;
///
/// @interface Foo
/// @end
/// Foo *f;
/// \endcode
/// pointerType()
/// matches "Foo *f", but does not match "int *a".
extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType;
/// Matches both lvalue and rvalue reference types.
///
/// Given
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f.
extern const AstTypeMatcher<ReferenceType> referenceType;
/// Matches lvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is
/// matched since the type is deduced as int& by reference collapsing rules.
extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType;
/// Matches rvalue reference types.
///
/// Given:
/// \code
/// int *a;
/// int &b = *a;
/// int &&c = 1;
/// auto &d = b;
/// auto &&e = c;
/// auto &&f = 2;
/// int g = 5;
/// \endcode
///
/// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not
/// matched as it is deduced to int& by reference collapsing rules.
extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType;
/// Narrows PointerType (and similar) matchers to those where the
/// \c pointee matches a given matcher.
///
/// Given
/// \code
/// int *a;
/// int const *b;
/// float const *f;
/// \endcode
/// pointerType(pointee(isConstQualified(), isInteger()))
/// matches "int const *b"
///
/// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>,
/// Matcher<PointerType>, Matcher<ReferenceType>
AST_TYPELOC_TRAVERSE_MATCHER_DECL(
pointee, getPointee,
AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType,
PointerType, ReferenceType));
/// Matches typedef types.
///
/// Given
/// \code
/// typedef int X;
/// \endcode
/// typedefType()
/// matches "typedef int X"
extern const AstTypeMatcher<TypedefType> typedefType;
/// Matches enum types.
///
/// Given
/// \code
/// enum C { Green };
/// enum class S { Red };
///
/// C c;
/// S s;
/// \endcode
//
/// \c enumType() matches the type of the variable declarations of both \c c and
/// \c s.
extern const AstTypeMatcher<EnumType> enumType;
/// Matches template specialization types.
///
/// Given
/// \code
/// template <typename T>
/// class C { };
///
/// template class C<int>; // A
/// C<char> var; // B
/// \endcode
///
/// \c templateSpecializationType() matches the type of the explicit
/// instantiation in \c A and the type of the variable declaration in \c B.
extern const AstTypeMatcher<TemplateSpecializationType>
templateSpecializationType;
/// Matches C++17 deduced template specialization types, e.g. deduced class
/// template types.
///
/// Given
/// \code
/// template <typename T>
/// class C { public: C(T); };
///
/// C c(123);
/// \endcode
/// \c deducedTemplateSpecializationType() matches the type in the declaration
/// of the variable \c c.
extern const AstTypeMatcher<DeducedTemplateSpecializationType>
deducedTemplateSpecializationType;
/// Matches types nodes representing unary type transformations.
///
/// Given:
/// \code
/// typedef __underlying_type(T) type;
/// \endcode
/// unaryTransformType()
/// matches "__underlying_type(T)"
extern const AstTypeMatcher<UnaryTransformType> unaryTransformType;
/// Matches record types (e.g. structs, classes).
///
/// Given
/// \code
/// class C {};
/// struct S {};
///
/// C c;
/// S s;
/// \endcode
///
/// \c recordType() matches the type of the variable declarations of both \c c
/// and \c s.
extern const AstTypeMatcher<RecordType> recordType;
/// Matches tag types (record and enum types).
///
/// Given
/// \code
/// enum E {};
/// class C {};
///
/// E e;
/// C c;
/// \endcode
///
/// \c tagType() matches the type of the variable declarations of both \c e
/// and \c c.
extern const AstTypeMatcher<TagType> tagType;
/// Matches types specified with an elaborated type keyword or with a
/// qualified name.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// class C {};
///
/// class C c;
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType() matches the type of the variable declarations of both
/// \c c and \c d.
extern const AstTypeMatcher<ElaboratedType> elaboratedType;
/// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier,
/// matches \c InnerMatcher if the qualifier exists.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N"))))
/// matches the type of the variable declaration of \c d.
AST_MATCHER_P(ElaboratedType, hasQualifier,
internal::Matcher<NestedNameSpecifier>, InnerMatcher) {
if (const NestedNameSpecifier *Qualifier = Node.getQualifier())
return InnerMatcher.matches(*Qualifier, Finder, Builder);
return false;
}
/// Matches ElaboratedTypes whose named type matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// N::M::D d;
/// \endcode
///
/// \c elaboratedType(namesType(recordType(
/// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable
/// declaration of \c d.
AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>,
InnerMatcher) {
return InnerMatcher.matches(Node.getNamedType(), Finder, Builder);
}
/// Matches types that represent the result of substituting a type for a
/// template type parameter.
///
/// Given
/// \code
/// template <typename T>
/// void F(T t) {
/// int i = 1 + t;
/// }
/// \endcode
///
/// \c substTemplateTypeParmType() matches the type of 't' but not '1'
extern const AstTypeMatcher<SubstTemplateTypeParmType>
substTemplateTypeParmType;
/// Matches template type parameter substitutions that have a replacement
/// type that matches the provided matcher.
///
/// Given
/// \code
/// template <typename T>
/// double F(T t);
/// int i;
/// double j = F(i);
/// \endcode
///
/// \c substTemplateTypeParmType(hasReplacementType(type())) matches int
AST_TYPE_TRAVERSE_MATCHER(
hasReplacementType, getReplacementType,
AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType));
/// Matches template type parameter types.
///
/// Example matches T, but not int.
/// (matcher = templateTypeParmType())
/// \code
/// template <typename T> void f(int i);
/// \endcode
extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType;
/// Matches injected class name types.
///
/// Example matches S s, but not S<T> s.
/// (matcher = parmVarDecl(hasType(injectedClassNameType())))
/// \code
/// template <typename T> struct S {
/// void f(S s);
/// void g(S<T> s);
/// };
/// \endcode
extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType;
/// Matches decayed type
/// Example matches i[] in declaration of f.
/// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType())))))
/// Example matches i[1].
/// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType())))))
/// \code
/// void f(int i[]) {
/// i[1] = 0;
/// }
/// \endcode
extern const AstTypeMatcher<DecayedType> decayedType;
/// Matches the decayed type, whos decayed type matches \c InnerMatcher
AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>,
InnerType) {
return InnerType.matches(Node.getDecayedType(), Finder, Builder);
}
/// Matches declarations whose declaration context, interpreted as a
/// Decl, matches \c InnerMatcher.
///
/// Given
/// \code
/// namespace N {
/// namespace M {
/// class D {};
/// }
/// }
/// \endcode
///
/// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the
/// declaration of \c class \c D.
AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) {
const DeclContext *DC = Node.getDeclContext();
if (!DC) return false;
return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder);
}
/// Matches nested name specifiers.
///
/// Given
/// \code
/// namespace ns {
/// struct A { static void f(); };
/// void A::f() {}
/// void g() { A::f(); }
/// }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier()
/// matches "ns::" and both "A::"
extern const internal::VariadicAllOfMatcher<NestedNameSpecifier>
nestedNameSpecifier;
/// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc.
extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc>
nestedNameSpecifierLoc;
/// Matches \c NestedNameSpecifierLocs for which the given inner
/// NestedNameSpecifier-matcher matches.
AST_MATCHER_FUNCTION_P_OVERLOAD(
internal::BindableMatcher<NestedNameSpecifierLoc>, loc,
internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) {
return internal::BindableMatcher<NestedNameSpecifierLoc>(
new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>(
InnerMatcher));
}
/// Matches nested name specifiers that specify a type matching the
/// given \c QualType matcher without qualifiers.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(specifiesType(
/// hasDeclaration(cxxRecordDecl(hasName("A")))
/// ))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifier, specifiesType,
internal::Matcher<QualType>, InnerMatcher) {
if (!Node.getAsType())
return false;
return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder);
}
/// Matches nested name specifier locs that specify a type matching the
/// given \c TypeLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type(
/// hasDeclaration(cxxRecordDecl(hasName("A")))))))
/// matches "A::"
AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc,
internal::Matcher<TypeLoc>, InnerMatcher) {
return Node && Node.getNestedNameSpecifier()->getAsType() &&
InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifier.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix,
internal::Matcher<NestedNameSpecifier>, InnerMatcher,
0) {
const NestedNameSpecifier *NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(*NextNode, Finder, Builder);
}
/// Matches on the prefix of a \c NestedNameSpecifierLoc.
///
/// Given
/// \code
/// struct A { struct B { struct C {}; }; };
/// A::B::C c;
/// \endcode
/// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A")))))
/// matches "A::"
AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix,
internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher,
1) {
NestedNameSpecifierLoc NextNode = Node.getPrefix();
if (!NextNode)
return false;
return InnerMatcher.matches(NextNode, Finder, Builder);
}
/// Matches nested name specifiers that specify a namespace matching the
/// given namespace matcher.
///
/// Given
/// \code
/// namespace ns { struct A {}; }
/// ns::A a;
/// \endcode
/// nestedNameSpecifier(specifiesNamespace(hasName("ns")))
/// matches "ns::"
AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace,
internal::Matcher<NamespaceDecl>, InnerMatcher) {
if (!Node.getAsNamespace())
return false;
return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder);
}
/// Overloads for the \c equalsNode matcher.
/// FIXME: Implement for other node types.
/// @{
/// Matches if a node equals another node.
///
/// \c Decl has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Stmt has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) {
return &Node == Other;
}
/// Matches if a node equals another node.
///
/// \c Type has pointer identity in the AST.
AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) {
return &Node == Other;
}
/// @}
/// Matches each case or default statement belonging to the given switch
/// statement. This matcher may produce multiple matches.
///
/// Given
/// \code
/// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } }
/// \endcode
/// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s")
/// matches four times, with "c" binding each of "case 1:", "case 2:",
/// "case 3:" and "case 4:", and "s" respectively binding "switch (1)",
/// "switch (1)", "switch (2)" and "switch (2)".
AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>,
InnerMatcher) {
BoundNodesTreeBuilder Result;
// FIXME: getSwitchCaseList() does not necessarily guarantee a stable
// iteration order. We should use the more general iterating matchers once
// they are capable of expressing this matcher (for example, it should ignore
// case statements belonging to nested switch statements).
bool Matched = false;
for (const SwitchCase *SC = Node.getSwitchCaseList(); SC;
SC = SC->getNextSwitchCase()) {
BoundNodesTreeBuilder CaseBuilder(*Builder);
bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder);
if (CaseMatched) {
Matched = true;
Result.addMatch(CaseBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches each constructor initializer in a constructor definition.
///
/// Given
/// \code
/// class A { A() : i(42), j(42) {} int i; int j; };
/// \endcode
/// cxxConstructorDecl(forEachConstructorInitializer(
/// forField(decl().bind("x"))
/// ))
/// will trigger two matches, binding for 'i' and 'j' respectively.
AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer,
internal::Matcher<CXXCtorInitializer>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto *I : Node.inits()) {
BoundNodesTreeBuilder InitBuilder(*Builder);
if (InnerMatcher.matches(*I, Finder, &InitBuilder)) {
Matched = true;
Result.addMatch(InitBuilder);
}
}
*Builder = std::move(Result);
return Matched;
}
/// Matches constructor declarations that are copy constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3.
AST_MATCHER(CXXConstructorDecl, isCopyConstructor) {
return Node.isCopyConstructor();
}
/// Matches constructor declarations that are move constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2.
AST_MATCHER(CXXConstructorDecl, isMoveConstructor) {
return Node.isMoveConstructor();
}
/// Matches constructor declarations that are default constructors.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(const S &); // #2
/// S(S &&); // #3
/// };
/// \endcode
/// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3.
AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) {
return Node.isDefaultConstructor();
}
/// Matches constructors that delegate to another constructor.
///
/// Given
/// \code
/// struct S {
/// S(); // #1
/// S(int) {} // #2
/// S(S &&) : S() {} // #3
/// };
/// S::S() : S(0) {} // #4
/// \endcode
/// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not
/// #1 or #2.
AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) {
return Node.isDelegatingConstructor();
}
/// Matches constructor, conversion function, and deduction guide declarations
/// that have an explicit specifier if this explicit specifier is resolved to
/// true.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9.
/// cxxConversionDecl(isExplicit()) will match #4, but not #3.
/// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5.
AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES(
CXXConstructorDecl, CXXConversionDecl,
CXXDeductionGuideDecl)) {
return Node.isExplicit();
}
/// Matches the expression in an explicit specifier if present in the given
/// declaration.
///
/// Given
/// \code
/// template<bool b>
/// struct S {
/// S(int); // #1
/// explicit S(double); // #2
/// operator int(); // #3
/// explicit operator bool(); // #4
/// explicit(false) S(bool) // # 7
/// explicit(true) S(char) // # 8
/// explicit(b) S(S) // # 9
/// };
/// S(int) -> S<true> // #5
/// explicit S(double) -> S<false> // #6
/// \endcode
/// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2.
/// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4.
/// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6.
AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>,
InnerMatcher) {
ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node);
if (!ES.getExpr())
return false;
return InnerMatcher.matches(*ES.getExpr(), Finder, Builder);
}
/// Matches function and namespace declarations that are marked with
/// the inline keyword.
///
/// Given
/// \code
/// inline void f();
/// void g();
/// namespace n {
/// inline namespace m {}
/// }
/// \endcode
/// functionDecl(isInline()) will match ::f().
/// namespaceDecl(isInline()) will match n::m.
AST_POLYMORPHIC_MATCHER(isInline,
AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl,
FunctionDecl)) {
// This is required because the spelling of the function used to determine
// whether inline is specified or not differs between the polymorphic types.
if (const auto *FD = dyn_cast<FunctionDecl>(&Node))
return FD->isInlineSpecified();
else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node))
return NSD->isInline();
llvm_unreachable("Not a valid polymorphic type");
}
/// Matches anonymous namespace declarations.
///
/// Given
/// \code
/// namespace n {
/// namespace {} // #1
/// }
/// \endcode
/// namespaceDecl(isAnonymous()) will match #1 but not ::n.
AST_MATCHER(NamespaceDecl, isAnonymous) {
return Node.isAnonymousNamespace();
}
/// Matches declarations in the namespace `std`, but not in nested namespaces.
///
/// Given
/// \code
/// class vector {};
/// namespace foo {
/// class vector {};
/// namespace std {
/// class vector {};
/// }
/// }
/// namespace std {
/// inline namespace __1 {
/// class vector {}; // #1
/// namespace experimental {
/// class vector {};
/// }
/// }
/// }
/// \endcode
/// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1.
AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); }
/// If the given case statement does not use the GNU case range
/// extension, matches the constant given in the statement.
///
/// Given
/// \code
/// switch (1) { case 1: case 1+1: case 3 ... 4: ; }
/// \endcode
/// caseStmt(hasCaseConstant(integerLiteral()))
/// matches "case 1:"
AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>,
InnerMatcher) {
if (Node.getRHS())
return false;
return InnerMatcher.matches(*Node.getLHS(), Finder, Builder);
}
/// Matches declaration that has a given attribute.
///
/// Given
/// \code
/// __attribute__((device)) void f() { ... }
/// \endcode
/// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of
/// f. If the matcher is used from clang-query, attr::Kind parameter should be
/// passed as a quoted string. e.g., hasAttr("attr::CUDADevice").
AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) {
for (const auto *Attr : Node.attrs()) {
if (Attr->getKind() == AttrKind)
return true;
}
return false;
}
/// Matches the return value expression of a return statement
///
/// Given
/// \code
/// return a + b;
/// \endcode
/// hasReturnValue(binaryOperator())
/// matches 'return a + b'
/// with binaryOperator()
/// matching 'a + b'
AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>,
InnerMatcher) {
if (const auto *RetValue = Node.getRetValue())
return InnerMatcher.matches(*RetValue, Finder, Builder);
return false;
}
/// Matches CUDA kernel call expression.
///
/// Example matches,
/// \code
/// kernel<<<i,j>>>();
/// \endcode
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
cudaKernelCallExpr;
/// Matches expressions that resolve to a null pointer constant, such as
/// GNU's __null, C++11's nullptr, or C's NULL macro.
///
/// Given:
/// \code
/// void *v1 = NULL;
/// void *v2 = nullptr;
/// void *v3 = __null; // GNU extension
/// char *cp = (char *)0;
/// int *ip = 0;
/// int i = 0;
/// \endcode
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
AST_MATCHER(Expr, nullPointerConstant) {
return Node.isNullPointerConstant(Finder->getASTContext(),
Expr::NPC_ValueDependentIsNull);
}
/// Matches declaration of the function the statement belongs to
///
/// Given:
/// \code
/// F& operator=(const F& o) {
/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
/// return *this;
/// }
/// \endcode
/// returnStmt(forFunction(hasName("operator=")))
/// matches 'return *this'
/// but does not match 'return v > 0'
AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
InnerMatcher) {
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
while(!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
return true;
}
} else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
Finder, Builder)) {
return true;
}
} else {
for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
return false;
}
/// Matches a declaration that has external formal linkage.
///
/// Example matches only z (matcher = varDecl(hasExternalFormalLinkage()))
/// \code
/// void f() {
/// int x;
/// static int y;
/// }
/// int z;
/// \endcode
///
/// Example matches f() because it has external formal linkage despite being
/// unique to the translation unit as though it has internal likage
/// (matcher = functionDecl(hasExternalFormalLinkage()))
///
/// \code
/// namespace {
/// void f() {}
/// }
/// \endcode
AST_MATCHER(NamedDecl, hasExternalFormalLinkage) {
return Node.hasExternalFormalLinkage();
}
/// Matches a declaration that has default arguments.
///
/// Example matches y (matcher = parmVarDecl(hasDefaultArgument()))
/// \code
/// void x(int val) {}
/// void y(int val = 0) {}
/// \endcode
///
/// Deprecated. Use hasInitializer() instead to be able to
/// match on the contents of the default argument. For example:
///
/// \code
/// void x(int val = 7) {}
/// void y(int val = 42) {}
/// \endcode
/// parmVarDecl(hasInitializer(integerLiteral(equals(42))))
/// matches the parameter of y
///
/// A matcher such as
/// parmVarDecl(hasInitializer(anything()))
/// is equivalent to parmVarDecl(hasDefaultArgument()).
AST_MATCHER(ParmVarDecl, hasDefaultArgument) {
return Node.hasDefaultArg();
}
/// Matches array new expressions.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(isArray())
/// matches the expression 'new MyClass[10]'.
AST_MATCHER(CXXNewExpr, isArray) {
return Node.isArray();
}
/// Matches placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage, 16) MyClass();
/// \endcode
/// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16))))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index,
internal::Matcher<Expr>, InnerMatcher) {
return Node.getNumPlacementArgs() > Index &&
InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder);
}
/// Matches any placement new expression arguments.
///
/// Given:
/// \code
/// MyClass *p1 = new (Storage) MyClass();
/// \endcode
/// cxxNewExpr(hasAnyPlacementArg(anything()))
/// matches the expression 'new (Storage, 16) MyClass()'.
AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>,
InnerMatcher) {
return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) {
return InnerMatcher.matches(*Arg, Finder, Builder);
});
}
/// Matches array new expressions with a given array size.
///
/// Given:
/// \code
/// MyClass *p1 = new MyClass[10];
/// \endcode
/// cxxNewExpr(hasArraySize(integerLiteral(equals(10))))
/// matches the expression 'new MyClass[10]'.
AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) {
return Node.isArray() && *Node.getArraySize() &&
InnerMatcher.matches(**Node.getArraySize(), Finder, Builder);
}
/// Matches a class declaration that is defined.
///
/// Example matches x (matcher = cxxRecordDecl(hasDefinition()))
/// \code
/// class x {};
/// class y;
/// \endcode
AST_MATCHER(CXXRecordDecl, hasDefinition) {
return Node.hasDefinition();
}
/// Matches C++11 scoped enum declaration.
///
/// Example matches Y (matcher = enumDecl(isScoped()))
/// \code
/// enum X {};
/// enum class Y {};
/// \endcode
AST_MATCHER(EnumDecl, isScoped) {
return Node.isScoped();
}
/// Matches a function declared with a trailing return type.
///
/// Example matches Y (matcher = functionDecl(hasTrailingReturn()))
/// \code
/// int X() {}
/// auto Y() -> int {}
/// \endcode
AST_MATCHER(FunctionDecl, hasTrailingReturn) {
if (const auto *F = Node.getType()->getAs<FunctionProtoType>())
return F->hasTrailingReturn();
return false;
}
/// Matches expressions that match InnerMatcher that are possibly wrapped in an
/// elidable constructor and other corresponding bookkeeping nodes.
///
/// In C++17, elidable copy constructors are no longer being generated in the
/// AST as it is not permitted by the standard. They are, however, part of the
/// AST in C++14 and earlier. So, a matcher must abstract over these differences
/// to work in all language modes. This matcher skips elidable constructor-call
/// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and
/// various implicit nodes inside the constructor calls, all of which will not
/// appear in the C++17 AST.
///
/// Given
///
/// \code
/// struct H {};
/// H G();
/// void f() {
/// H D = G();
/// }
/// \endcode
///
/// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))``
/// matches ``H D = G()`` in C++11 through C++17 (and beyond).
AST_MATCHER_P(Expr, ignoringElidableConstructorCall,
ast_matchers::internal::Matcher<Expr>, InnerMatcher) {
// E tracks the node that we are examining.
const Expr *E = &Node;
// If present, remove an outer `ExprWithCleanups` corresponding to the
// underlying `CXXConstructExpr`. This check won't cover all cases of added
// `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the
// EWC is placed on the outermost node of the expression, which this may not
// be), but, it still improves the coverage of this matcher.
if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node))
E = CleanupsExpr->getSubExpr();
if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) {
if (CtorExpr->isElidable()) {
if (const auto *MaterializeTemp =
dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) {
return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder,
Builder);
}
}
}
return InnerMatcher.matches(Node, Finder, Builder);
}
//----------------------------------------------------------------------------//
// OpenMP handling.
//----------------------------------------------------------------------------//
/// Matches any ``#pragma omp`` executable directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective()`` matches ``omp parallel``,
/// ``omp parallel default(none)`` and ``omp taskyield``.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective>
ompExecutableDirective;
/// Matches standalone OpenMP directives,
/// i.e., directives that can't have a structured block.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// {}
/// #pragma omp taskyield
/// \endcode
///
/// ``ompExecutableDirective(isStandaloneDirective()))`` matches
/// ``omp taskyield``.
AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) {
return Node.isStandaloneDirective();
}
/// Matches the structured-block of the OpenMP executable directive
///
/// Prerequisite: the executable directive must not be standalone directive.
/// If it is, it will never match.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// ;
/// #pragma omp parallel
/// {}
/// \endcode
///
/// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;``
AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock,
internal::Matcher<Stmt>, InnerMatcher) {
if (Node.isStandaloneDirective())
return false; // Standalone directives have no structured blocks.
return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder);
}
/// Matches any clause in an OpenMP directive.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// \endcode
///
/// ``ompExecutableDirective(hasAnyClause(anything()))`` matches
/// ``omp parallel default(none)``.
AST_MATCHER_P(OMPExecutableDirective, hasAnyClause,
internal::Matcher<OMPClause>, InnerMatcher) {
ArrayRef<OMPClause *> Clauses = Node.clauses();
return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(),
Clauses.end(), Finder, Builder);
}
/// Matches OpenMP ``default`` clause.
///
/// Given
///
/// \code
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// #pragma omp parallel
/// \endcode
///
/// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and
/// ``default(firstprivate)``
extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause>
ompDefaultClause;
/// Matches if the OpenMP ``default`` clause has ``none`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``.
AST_MATCHER(OMPDefaultClause, isNoneKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none;
}
/// Matches if the OpenMP ``default`` clause has ``shared`` kind specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``.
AST_MATCHER(OMPDefaultClause, isSharedKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared;
}
/// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind
/// specified.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel default(none)
/// #pragma omp parallel default(shared)
/// #pragma omp parallel default(firstprivate)
/// \endcode
///
/// ``ompDefaultClause(isFirstPrivateKind())`` matches only
/// ``default(firstprivate)``.
AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) {
return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate;
}
/// Matches if the OpenMP directive is allowed to contain the specified OpenMP
/// clause kind.
///
/// Given
///
/// \code
/// #pragma omp parallel
/// #pragma omp parallel for
/// #pragma omp for
/// \endcode
///
/// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches
/// ``omp parallel`` and ``omp parallel for``.
///
/// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter
/// should be passed as a quoted string. e.g.,
/// ``isAllowedToContainClauseKind("OMPC_default").``
AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind,
OpenMPClauseKind, CKind) {
return llvm::omp::isAllowedClauseForDirective(
Node.getDirectiveKind(), CKind,
Finder->getASTContext().getLangOpts().OpenMP);
}
//----------------------------------------------------------------------------//
// End OpenMP handling.
//----------------------------------------------------------------------------//
} // namespace ast_matchers
} // namespace clang
#endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
|
GB_unaryop__ainv_int16_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int16_fp64
// op(A') function: GB_tran__ainv_int16_fp64
// C type: int16_t
// A type: double
// cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16)
// unaryop: cij = -aij
#define GB_ATYPE \
double
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int16_t z ; GB_CAST_SIGNED(z,x,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int16_fp64
(
int16_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int16_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
zunmlq.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_unmlq
*
* Overwrites the general complex m-by-n matrix C with
*
* side = PlasmaLeft side = PlasmaRight
* trans = PlasmaNoTrans Q * C C * Q
* trans = Plasma_ConjTrans Q^H * C C * Q^H
*
* where Q is an orthogonal (or unitary) matrix defined as the product of k
* elementary reflectors
*
* Q = H(1) H(2) . . . H(k)
*
* as returned by plasma_zgelqf. Q is of order m if side = PlasmaLeft
* and of order n if side = PlasmaRight.
*
*******************************************************************************
*
* @param[in] side
* Intended usage:
* - PlasmaLeft: apply Q or Q^H from the left;
* - PlasmaRight: apply Q or Q^H from the right.
*
* @param[in] trans
* Intended usage:
* - PlasmaNoTrans: apply Q;
* - Plasma_ConjTrans: apply Q^H.
*
* @param[in] m
* The number of rows of the matrix C. m >= 0.
*
* @param[in] n
* The number of columns of the matrix C. n >= 0.
*
* @param[in] k
* The number of rows of elementary tile reflectors whose product
* defines the matrix Q.
* If side == PlasmaLeft, m >= k >= 0.
* If side == PlasmaRight, n >= k >= 0.
*
* @param[in] pA
* Details of the LQ factorization of the original matrix A as returned
* by plasma_zgelqf.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,k).
*
* @param[in] T
* Auxiliary factorization data, computed by plasma_zgelqf.
*
* @param[in,out] pC
* On entry, pointer to the m-by-n matrix C.
* On exit, C is overwritten by Q*C, Q^H*C, C*Q, or C*Q^H.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_zunmlq
* @sa plasma_cunmlq
* @sa plasma_dormlq
* @sa plasma_sormlq
* @sa plasma_zgelqf
*
******************************************************************************/
int plasma_zunmlq(plasma_enum_t side, plasma_enum_t trans,
int m, int n, int k,
plasma_complex64_t *pA, int lda,
plasma_desc_t T,
plasma_complex64_t *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((side != PlasmaLeft) && (side != PlasmaRight)) {
plasma_error("illegal value of side");
return -1;
}
if ((trans != Plasma_ConjTrans) && (trans != PlasmaNoTrans)) {
plasma_error("illegal value of trans");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
int an;
if (side == PlasmaLeft) {
an = m;
}
else {
an = n;
}
if ((k < 0) || (k > an)) {
plasma_error("illegal value of k");
return -5;
}
if (lda < imax(1, k)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldc < imax(1, m)) {
plasma_error("illegal value of ldc");
return -10;
}
// quick return
if (m == 0 || n == 0 || k == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gelqf(plasma, PlasmaComplexDouble, m, n);
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
k, an, 0, 0, k, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = ib*nb; // unmlq: work
retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, &sequence, &request);
plasma_omp_zge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_zunmlq(side, trans,
A, T, C, work,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_unmlq
*
* Non-blocking tile version of plasma_zunmlq().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] side
* Intended usage:
* - PlasmaLeft: apply Q or Q^H from the left;
* - PlasmaRight: apply Q or Q^H from the right.
*
* @param[in] trans
* Intended usage:
* - PlasmaNoTrans: apply Q;
* - Plasma_ConjTrans: apply Q^H.
*
* @param[in] A
* Descriptor of matrix A stored in the tile layout.
* Details of the QR factorization of the original matrix A as returned
* by plasma_zgeqrf.
*
* @param[in] T
* Descriptor of matrix T.
* Auxiliary factorization data, computed by plasma_zgeqrf.
*
* @param[in,out] C
* Descriptor of matrix C.
* On entry, the m-by-n matrix C.
* On exit, C is overwritten by Q*C, Q^H*C, C*Q, or C*Q^H.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For multiplication by Q contains preallocated space for work
* arrays. Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zunmlq
* @sa plasma_omp_cunmlq
* @sa plasma_omp_dormlq
* @sa plasma_omp_sormlq
* @sa plasma_omp_zgelqf
*
******************************************************************************/
void plasma_omp_zunmlq(plasma_enum_t side, plasma_enum_t trans,
plasma_desc_t A, plasma_desc_t T, plasma_desc_t C,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((side != PlasmaLeft) && (side != PlasmaRight)) {
plasma_error("invalid value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((trans != Plasma_ConjTrans) && (trans != PlasmaNoTrans)) {
plasma_error("invalid value of trans");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (C.m == 0 || C.n == 0 || A.m == 0 || A.n == 0)
return;
// Call the parallel function.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pzunmlq_tree(side, trans,
A, T, C,
work, sequence, request);
}
else {
plasma_pzunmlq(side, trans,
A, T, C,
work, sequence, request);
}
}
|
TaskClauseLink.c | int x;
int main() {
int x;
#pragma omp task if (1) final(1)
{
11;
}
#pragma omp task
{
int x;
}
}
|
ast-dump-openmp-target-teams-distribute.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target teams distribute
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target teams distribute
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target teams distribute collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target teams distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target teams distribute collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:4:1, col:36>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:10:1, col:36>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:17:1, col:48>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:46> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeDirective {{.*}} <line:24:1, col:48>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:46> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetTeamsDistributeDirective {{.*}} <line:31:1, col:48>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:37, col:47>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:46> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:46> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:3> col:3 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:5> col:5 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
valid.yolo9.src.h | #pragma once
#include "ukr.h"
#include "omp.h"
#include "transpose.h"
#include "gen_ukr_A6B2gemm_1_1024_17_17_512_3_3.h"
#include "gen_ukr_A1B2gemm_1_1024_17_17_512_3_3.h"
void testrun(float* A ,float*B, float*C, float*oriB ){
int tid = omp_get_thread_num();
int Nx = 17;
int Ny = 17;
int Nh = 3;
long long Astrides[6] = {0,1,2,3,4,5};
int b1 = 0;
for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){
for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){
transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16);
transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16);
}
}
#pragma omp barrier// begin push button generated block
for(int c5=0;c5<512+0;c5+=512)
{
for(int xy5=0;xy5<289+0;xy5+=289)
{
for(int f5=0;f5<1024+0;f5+=1024)
{
for(int c4=c5;c4<min(512, 512+c5);c4+=512)
{
for(int f4=f5;f4<min(1024, 1024+f5);f4+=1024)
{
for(int xy4=xy5;xy4<min(289, 289+xy5);xy4+=289)
{
for(int c3=c4;c3<min(512, 512+c4);c3+=Tc1)
{
for(int xy3=xy4;xy3<min(289, 289+xy4);xy3+=Txy3)
{
for(int f3=f4;f3<min(1024, 1024+f4);f3+=Tf2)
{
for(int xy2=xy3;xy2<min(289, Txy3+xy3);xy2+=6)
{
for(int f2=f3;f2<min(1024, Tf2+f3);f2+=16)
{
for(int c2=c3;c2<min(512, Tc1+c3);c2+=Tc1)
{
for(int c1=c2;c1<min(512, Tc1+c2);c1+=Tc1)
{
for(int xy1=xy2;xy1<min(289, 6+xy2);xy1+=6)
{
for(int f1=f2;f1<min(1024, 16+f2);f1+=16)
{
int ctile=min(Tc1, 512-c1);
int x1=xy1/17;
int y1=xy1%17/1;
int c1_1=c1/1;
int c1_2=c1%1/1;
int kf1_1=f1/16;
int kf1_2=f1%16/1;
int of1_1=f1/1;
int of1_2=f1%1/1;
int offsetA=0+b1*184832+c1_1*361+1*x1*19+1*y1*1+c1_2*1;
int offsetB=0+kf1_1*73728+c1*144+0*48+0*16+kf1_2*1;
int offsetC=0+b1*295936+of1_1*289+x1*17+y1*1+of1_2*1;
if(17-y1>=6){
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
else if(17*17-xy1>=6){
for(int sti=17-y1;sti<6;sti+=1)
{
Astrides[sti]+=2;
}
cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
for(int sti=17-y1;sti<6;sti+=1)
{
Astrides[sti]-=2;
}
}
else{
cnn_ukr_float_scatter_1x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides);
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
}
// end push button generated block
} |
test_utils.h | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <iomanip>
#include <iostream>
#include <limits>
#include <map>
#include <sstream>
#include <string>
#include <utility>
extern "C" {
#include "mmio.h"
}
#include <cuda.h>
#include <cuda_profiler_api.h>
#include <cuda_runtime.h>
#include <library_types.h>
#include <thrust/adjacent_difference.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <rmm/rmm.h>
#include "utilities/error_utils.h"
#include "converters/COOtoCSR.cuh"
#ifndef CUDA_RT_CALL
#define CUDA_RT_CALL(call) \
{ \
cudaError_t cudaStatus = call; \
if (cudaSuccess != cudaStatus) { \
fprintf(stderr, \
"ERROR: CUDA RT call \"%s\" in line %d of file %s failed with %s (%d).\n", \
#call, \
__LINE__, \
__FILE__, \
cudaGetErrorString(cudaStatus), \
cudaStatus); \
} \
}
#endif
#define NCCLCHECK(cmd) \
{ \
ncclResult_t nccl_status = cmd; \
if (nccl_status != ncclSuccess) { \
printf("NCCL failure %s:%d '%s'\n", __FILE__, __LINE__, ncclGetErrorString(nccl_status)); \
FAIL(); \
} \
}
#define MPICHECK(cmd) \
{ \
int e = cmd; \
if (e != MPI_SUCCESS) { \
printf("Failed: MPI error %s:%d '%d'\n", __FILE__, __LINE__, e); \
FAIL(); \
} \
}
std::string getFileName(const std::string& s)
{
char sep = '/';
#ifdef _WIN32
sep = '\\';
#endif
size_t i = s.rfind(sep, s.length());
if (i != std::string::npos) { return (s.substr(i + 1, s.length() - i)); }
return ("");
}
template <typename T>
void verbose_diff(std::vector<T>& v1, std::vector<T>& v2)
{
for (unsigned int i = 0; i < v1.size(); ++i) {
if (v1[i] != v2[i]) {
std::cout << "[" << i << "] : " << v1[i] << " vs. " << v2[i] << std::endl;
}
}
}
template <typename T>
int eq(std::vector<T>& v1, std::vector<T>& v2)
{
if (v1 == v2)
return 0;
else {
verbose_diff(v1, v2);
return 1;
}
}
template <typename T>
void printv(size_t n, T* vec, int offset)
{
thrust::device_ptr<T> dev_ptr(vec);
std::cout.precision(15);
std::cout << "sample size = " << n << ", offset = " << offset << std::endl;
thrust::copy(
dev_ptr + offset,
dev_ptr + offset + n,
std::ostream_iterator<T>(
std::cout, " ")); // Assume no RMM dependency; FIXME: check / test (potential BUG !!!!!)
std::cout << std::endl;
}
template <typename T>
void random_vals(std::vector<T>& v)
{
srand(42);
for (auto i = size_t{0}; i < v.size(); i++) v[i] = static_cast<T>(std::rand() % 10);
}
template <typename T_ELEM>
void ref_csr2csc(int m,
int n,
int nnz,
const T_ELEM* csrVals,
const int* csrRowptr,
const int* csrColInd,
T_ELEM* cscVals,
int* cscRowind,
int* cscColptr,
int base = 0)
{
int i, j, row, col, index;
int* counters;
T_ELEM val;
/* early return */
if ((m <= 0) || (n <= 0) || (nnz <= 0)) { return; }
/* build compressed column pointers */
memset(cscColptr, 0, (n + 1) * sizeof(cscColptr[0]));
cscColptr[0] = base;
for (i = 0; i < nnz; i++) { cscColptr[1 + csrColInd[i] - base]++; }
for (i = 0; i < n; i++) { cscColptr[i + 1] += cscColptr[i]; }
/* expand row indecis and copy them and values into csc arrays according to permutation */
counters = (int*)malloc(n * sizeof(counters[0]));
memset(counters, 0, n * sizeof(counters[0]));
for (i = 0; i < m; i++) {
for (j = csrRowptr[i]; j < csrRowptr[i + 1]; j++) {
row = i + base;
col = csrColInd[j - base];
index = cscColptr[col - base] - base + counters[col - base];
counters[col - base]++;
cscRowind[index] = row;
if (csrVals != NULL || cscVals != NULL) {
val = csrVals[j - base];
cscVals[index] = val;
}
}
}
free(counters);
}
template <typename T>
int transition_matrix_cpu(int n, int e, int* csrRowPtrA, int* csrColIndA, T* weight, T* is_leaf)
// omp_set_num_threads(4);
//#pragma omp parallel
{
int j, row, row_size;
//#pragma omp for
for (row = 0; row < n; row++) {
row_size = csrRowPtrA[row + 1] - csrRowPtrA[row];
if (row_size == 0)
is_leaf[row] = 1.0;
else {
is_leaf[row] = 0.0;
for (j = csrRowPtrA[row]; j < csrRowPtrA[row + 1]; j++) weight[j] = 1.0 / row_size;
}
}
return 0;
}
template <typename T>
void printCsrMatI(int m,
int n,
int nnz,
std::vector<int>& csrRowPtr,
std::vector<uint16_t>& csrColInd,
std::vector<T>& csrVal)
{
std::vector<T> v(n);
std::stringstream ss;
ss.str(std::string());
ss << std::fixed;
ss << std::setprecision(2);
for (int i = 0; i < m; i++) {
std::fill(v.begin(), v.end(), 0);
for (int j = csrRowPtr[i]; j < csrRowPtr[i + 1]; j++) v[csrColInd[j]] = csrVal[j];
std::copy(v.begin(), v.end(), std::ostream_iterator<int>(ss, " "));
ss << "\n";
}
ss << "\n";
std::cout << ss.str();
}
/// Read matrix properties from Matrix Market file
/** Matrix Market file is assumed to be a sparse matrix in coordinate
* format.
*
* @param f File stream for Matrix Market file.
* @param tg Boolean indicating whether to convert matrix to general
* format (from symmetric, Hermitian, or skew symmetric format).
* @param t (Output) MM_typecode with matrix properties.
* @param m (Output) Number of matrix rows.
* @param n (Output) Number of matrix columns.
* @param nnz (Output) Number of non-zero matrix entries.
* @return Zero if properties were read successfully. Otherwise
* non-zero.
*/
template <typename IndexType_>
int mm_properties(FILE* f, int tg, MM_typecode* t, IndexType_* m, IndexType_* n, IndexType_* nnz)
{
// Read matrix properties from file
int mint, nint, nnzint;
if (fseek(f, 0, SEEK_SET)) {
fprintf(stderr, "Error: could not set position in file\n");
return -1;
}
if (mm_read_banner(f, t)) {
fprintf(stderr, "Error: could not read Matrix Market file banner\n");
return -1;
}
if (!mm_is_matrix(*t) || !mm_is_coordinate(*t)) {
fprintf(stderr, "Error: file does not contain matrix in coordinate format\n");
return -1;
}
if (mm_read_mtx_crd_size(f, &mint, &nint, &nnzint)) {
fprintf(stderr, "Error: could not read matrix dimensions\n");
return -1;
}
if (!mm_is_pattern(*t) && !mm_is_real(*t) && !mm_is_integer(*t) && !mm_is_complex(*t)) {
fprintf(stderr, "Error: matrix entries are not valid type\n");
return -1;
}
*m = mint;
*n = nint;
*nnz = nnzint;
// Find total number of non-zero entries
if (tg && !mm_is_general(*t)) {
// Non-diagonal entries should be counted twice
IndexType_ nnzOld = *nnz;
*nnz *= 2;
// Diagonal entries should not be double-counted
int i;
int st;
for (i = 0; i < nnzOld; ++i) {
// Read matrix entry
IndexType_ row, col;
double rval, ival;
if (mm_is_pattern(*t))
st = fscanf(f, "%d %d\n", &row, &col);
else if (mm_is_real(*t) || mm_is_integer(*t))
st = fscanf(f, "%d %d %lg\n", &row, &col, &rval);
else // Complex matrix
st = fscanf(f, "%d %d %lg %lg\n", &row, &col, &rval, &ival);
if (ferror(f) || (st == EOF)) {
fprintf(stderr, "Error: error %d reading Matrix Market file (entry %d)\n", st, i + 1);
return -1;
}
// Check if entry is diagonal
if (row == col) --(*nnz);
}
}
return 0;
}
/// Read Matrix Market file and convert to COO format matrix
/** Matrix Market file is assumed to be a sparse matrix in coordinate
* format.
*
* @param f File stream for Matrix Market file.
* @param tg Boolean indicating whether to convert matrix to general
* format (from symmetric, Hermitian, or skew symmetric format).
* @param nnz Number of non-zero matrix entries.
* @param cooRowInd (Output) Row indices for COO matrix. Should have
* at least nnz entries.
* @param cooColInd (Output) Column indices for COO matrix. Should
* have at least nnz entries.
* @param cooRVal (Output) Real component of COO matrix
* entries. Should have at least nnz entries. Ignored if null
* pointer.
* @param cooIVal (Output) Imaginary component of COO matrix
* entries. Should have at least nnz entries. Ignored if null
* pointer.
* @return Zero if matrix was read successfully. Otherwise non-zero.
*/
template <typename IndexType_, typename ValueType_>
int mm_to_coo(FILE* f,
int tg,
IndexType_ nnz,
IndexType_* cooRowInd,
IndexType_* cooColInd,
ValueType_* cooRVal,
ValueType_* cooIVal)
{
// Read matrix properties from file
MM_typecode t;
int m, n, nnzOld;
if (fseek(f, 0, SEEK_SET)) {
fprintf(stderr, "Error: could not set position in file\n");
return -1;
}
if (mm_read_banner(f, &t)) {
fprintf(stderr, "Error: could not read Matrix Market file banner\n");
return -1;
}
if (!mm_is_matrix(t) || !mm_is_coordinate(t)) {
fprintf(stderr, "Error: file does not contain matrix in coordinate format\n");
return -1;
}
if (mm_read_mtx_crd_size(f, &m, &n, &nnzOld)) {
fprintf(stderr, "Error: could not read matrix dimensions\n");
return -1;
}
if (!mm_is_pattern(t) && !mm_is_real(t) && !mm_is_integer(t) && !mm_is_complex(t)) {
fprintf(stderr, "Error: matrix entries are not valid type\n");
return -1;
}
// Add each matrix entry in file to COO format matrix
IndexType_ i; // Entry index in Matrix Market file
IndexType_ j = 0; // Entry index in COO format matrix
for (i = 0; i < nnzOld; ++i) {
// Read entry from file
int row, col;
double rval, ival;
int st;
if (mm_is_pattern(t)) {
st = fscanf(f, "%d %d\n", &row, &col);
rval = 1.0;
ival = 0.0;
} else if (mm_is_real(t) || mm_is_integer(t)) {
st = fscanf(f, "%d %d %lg\n", &row, &col, &rval);
ival = 0.0;
} else // Complex matrix
st = fscanf(f, "%d %d %lg %lg\n", &row, &col, &rval, &ival);
if (ferror(f) || (st == EOF)) {
fprintf(stderr, "Error: error %d reading Matrix Market file (entry %d)\n", st, i + 1);
return -1;
}
// Switch to 0-based indexing
--row;
--col;
// Record entry
cooRowInd[j] = row;
cooColInd[j] = col;
if (cooRVal != NULL) cooRVal[j] = rval;
if (cooIVal != NULL) cooIVal[j] = ival;
++j;
// Add symmetric complement of non-diagonal entries
if (tg && !mm_is_general(t) && (row != col)) {
// Modify entry value if matrix is skew symmetric or Hermitian
if (mm_is_skew(t)) {
rval = -rval;
ival = -ival;
} else if (mm_is_hermitian(t)) {
ival = -ival;
}
// Record entry
cooRowInd[j] = col;
cooColInd[j] = row;
if (cooRVal != NULL) cooRVal[j] = rval;
if (cooIVal != NULL) cooIVal[j] = ival;
++j;
}
}
return 0;
}
/// Compare two tuples based on the element indexed by i
class lesser_tuple {
const int i;
public:
lesser_tuple(int _i) : i(_i) {}
template <typename Tuple1, typename Tuple2>
__host__ __device__ bool operator()(const Tuple1 t1, const Tuple2 t2)
{
switch (i) {
case 0:
return (thrust::get<0>(t1) == thrust::get<0>(t2) ? thrust::get<1>(t1) < thrust::get<1>(t2)
: thrust::get<0>(t1) < thrust::get<0>(t2));
case 1:
return (thrust::get<1>(t1) == thrust::get<1>(t2) ? thrust::get<0>(t1) < thrust::get<0>(t2)
: thrust::get<1>(t1) < thrust::get<1>(t2));
default:
return (thrust::get<0>(t1) == thrust::get<0>(t2) ? thrust::get<1>(t1) < thrust::get<1>(t2)
: thrust::get<0>(t1) < thrust::get<0>(t2));
}
}
};
/// Sort entries in COO format matrix
/** Sort is stable.
*
* @param nnz Number of non-zero matrix entries.
* @param sort_by_row Boolean indicating whether matrix entries
* will be sorted by row index or by column index.
* @param cooRowInd Row indices for COO matrix.
* @param cooColInd Column indices for COO matrix.
* @param cooRVal Real component for COO matrix entries. Ignored if
* null pointer.
* @param cooIVal Imaginary component COO matrix entries. Ignored if
* null pointer.
*/
template <typename IndexType_, typename ValueType_>
void coo_sort(IndexType_ nnz,
int sort_by_row,
IndexType_* cooRowInd,
IndexType_* cooColInd,
ValueType_* cooRVal,
ValueType_* cooIVal)
{
// Determine whether to sort by row or by column
int i;
if (sort_by_row == 0)
i = 1;
else
i = 0;
// Apply stable sort
using namespace thrust;
if ((cooRVal == NULL) && (cooIVal == NULL))
stable_sort(make_zip_iterator(make_tuple(cooRowInd, cooColInd)),
make_zip_iterator(make_tuple(cooRowInd + nnz, cooColInd + nnz)),
lesser_tuple(i));
else if ((cooRVal == NULL) && (cooIVal != NULL))
stable_sort(make_zip_iterator(make_tuple(cooRowInd, cooColInd, cooIVal)),
make_zip_iterator(make_tuple(cooRowInd + nnz, cooColInd + nnz, cooIVal + nnz)),
lesser_tuple(i));
else if ((cooRVal != NULL) && (cooIVal == NULL))
stable_sort(make_zip_iterator(make_tuple(cooRowInd, cooColInd, cooRVal)),
make_zip_iterator(make_tuple(cooRowInd + nnz, cooColInd + nnz, cooRVal + nnz)),
lesser_tuple(i));
else
stable_sort(
make_zip_iterator(make_tuple(cooRowInd, cooColInd, cooRVal, cooIVal)),
make_zip_iterator(make_tuple(cooRowInd + nnz, cooColInd + nnz, cooRVal + nnz, cooIVal + nnz)),
lesser_tuple(i));
}
template <typename IndexT>
void coo2csr(std::vector<IndexT>& cooRowInd, // in: I[] (overwrite)
const std::vector<IndexT>& cooColInd, // in: J[]
std::vector<IndexT>& csrRowPtr, // out
std::vector<IndexT>& csrColInd) // out
{
std::vector<std::pair<IndexT, IndexT>> items;
for (auto i = size_t{0}; i < cooRowInd.size(); ++i)
items.push_back(std::make_pair(cooRowInd[i], cooColInd[i]));
// sort pairs
std::sort(items.begin(),
items.end(),
[](const std::pair<IndexT, IndexT>& left, const std::pair<IndexT, IndexT>& right) {
return left.first < right.first;
});
for (auto i = size_t{0}; i < cooRowInd.size(); ++i) {
cooRowInd[i] = items[i].first; // save the sorted rows to compress them later
csrColInd[i] = items[i].second; // save the col idx, not sure if they are sorted for each row
}
// Count number of elements per row
for (auto i = size_t{0}; i < cooRowInd.size(); ++i) ++(csrRowPtr[cooRowInd[i] + 1]);
// Compute cumulative sum to obtain row offsets/pointers
for (auto i = size_t{0}; i < csrRowPtr.size() - 1; ++i) csrRowPtr[i + 1] += csrRowPtr[i];
}
/// Compress sorted list of indices
/** For use in converting COO format matrix to CSR or CSC format.
*
* @param n Maximum index.
* @param nnz Number of non-zero matrix entries.
* @param sortedIndices Sorted list of indices (COO format).
* @param compressedIndices (Output) Compressed list of indices (CSR
* or CSC format). Should have at least n+1 entries.
*/
template <typename IndexType_>
void coo_compress(IndexType_ m,
IndexType_ n,
IndexType_ nnz,
const IndexType_* __restrict__ sortedIndices,
IndexType_* __restrict__ compressedIndices)
{
IndexType_ i;
// Initialize everything to zero
memset(compressedIndices, 0, (m + 1) * sizeof(IndexType_));
// Count number of elements per row
for (i = 0; i < nnz; ++i) ++(compressedIndices[sortedIndices[i] + 1]);
// Compute cumulative sum to obtain row offsets/pointers
for (i = 0; i < m; ++i) compressedIndices[i + 1] += compressedIndices[i];
}
/// Convert COO format matrix to CSR format
/** On output, matrix entries in COO format matrix will be sorted
* (primarily by row index, secondarily by column index).
*
* @param m Number of matrix rows.
* @param n Number of matrix columns.
* @param nnz Number of non-zero matrix entries.
* @param cooRowInd Row indices for COO matrix.
* @param cooColInd Column indices for COO matrix.
* @param cooRVal Real component of COO matrix entries. Ignored if
* null pointer.
* @param cooIVal Imaginary component of COO matrix entries. Ignored
* if null pointer.
* @param csrRowPtr Row pointers for CSR matrix. Should have at least
* n+1 entries.
* @param csrColInd Column indices for CSR matrix (identical to
* output of cooColInd). Should have at least nnz entries. Ignored if
* null pointer.
* @param csrRVal Real component of CSR matrix entries (identical to
* output of cooRVal). Should have at least nnz entries. Ignored if
* null pointer.
* @param csrIVal Imaginary component of CSR matrix entries
* (identical to output of cooIVal). Should have at least nnz
* entries. Ignored if null pointer.
* @return Zero if matrix was converted successfully. Otherwise
* non-zero.
*/
template <typename IndexType_, typename ValueType_>
int coo_to_csr(IndexType_ m,
IndexType_ n,
IndexType_ nnz,
IndexType_* __restrict__ cooRowInd,
IndexType_* __restrict__ cooColInd,
ValueType_* __restrict__ cooRVal,
ValueType_* __restrict__ cooIVal,
IndexType_* __restrict__ csrRowPtr,
IndexType_* __restrict__ csrColInd,
ValueType_* __restrict__ csrRVal,
ValueType_* __restrict__ csrIVal)
{
// Convert COO to CSR matrix
coo_sort(nnz, 0, cooRowInd, cooColInd, cooRVal, cooIVal);
coo_sort(nnz, 1, cooRowInd, cooColInd, cooRVal, cooIVal);
// coo_sort2<int,float>(m, nnz, cooRowInd, cooColInd);
coo_compress(m, n, nnz, cooRowInd, csrRowPtr);
// Copy arrays
if (csrColInd != NULL) memcpy(csrColInd, cooColInd, nnz * sizeof(IndexType_));
if ((cooRVal != NULL) && (csrRVal != NULL)) memcpy(csrRVal, cooRVal, nnz * sizeof(ValueType_));
if ((cooIVal != NULL) && (csrIVal != NULL)) memcpy(csrIVal, cooIVal, nnz * sizeof(ValueType_));
return 0;
}
int read_binary_vector(FILE* fpin, int n, std::vector<float>& val)
{
size_t is_read1;
double* t_storage = new double[n];
is_read1 = fread(t_storage, sizeof(double), n, fpin);
for (int i = 0; i < n; i++) {
if (t_storage[i] == DBL_MAX)
val[i] = FLT_MAX;
else if (t_storage[i] == -DBL_MAX)
val[i] = -FLT_MAX;
else
val[i] = static_cast<float>(t_storage[i]);
}
delete[] t_storage;
if (is_read1 != (size_t)n) {
printf("%s", "I/O fail\n");
return 1;
}
return 0;
}
int read_binary_vector(FILE* fpin, int n, std::vector<double>& val)
{
size_t is_read1;
is_read1 = fread(&val[0], sizeof(double), n, fpin);
if (is_read1 != (size_t)n) {
printf("%s", "I/O fail\n");
return 1;
}
return 0;
}
// FIXME: A similar function could be useful for CSC format
// There are functions above that operate coo -> csr and coo->csc
/**
* @tparam
*/
template <typename VT, typename ET, typename WT>
std::unique_ptr<cugraph::experimental::GraphCSR<VT, ET, WT>> generate_graph_csr_from_mm(
bool& directed, std::string mm_file)
{
VT number_of_vertices;
ET number_of_edges;
FILE* fpin = fopen(mm_file.c_str(), "r");
EXPECT_NE(fpin, nullptr);
VT number_of_columns = 0;
MM_typecode mm_typecode{0};
EXPECT_EQ(mm_properties<VT>(
fpin, 1, &mm_typecode, &number_of_vertices, &number_of_columns, &number_of_edges),
0);
EXPECT_TRUE(mm_is_matrix(mm_typecode));
EXPECT_TRUE(mm_is_coordinate(mm_typecode));
EXPECT_FALSE(mm_is_complex(mm_typecode));
EXPECT_FALSE(mm_is_skew(mm_typecode));
directed = !mm_is_symmetric(mm_typecode);
// Allocate memory on host
std::vector<VT> coo_row_ind(number_of_edges);
std::vector<VT> coo_col_ind(number_of_edges);
std::vector<WT> coo_val(number_of_edges);
// Read
EXPECT_EQ((mm_to_coo<VT, WT>(
fpin, 1, number_of_edges, &coo_row_ind[0], &coo_col_ind[0], &coo_val[0], NULL)),
0);
EXPECT_EQ(fclose(fpin), 0);
cugraph::experimental::GraphCOOView<VT, ET, WT> cooview(
&coo_col_ind[0], &coo_row_ind[0], &coo_val[0], number_of_vertices, number_of_edges);
return cugraph::coo_to_csr(cooview);
}
////////////////////////////////////////////////////////////////////////////////
// FIXME: move this code to rapids-core
////////////////////////////////////////////////////////////////////////////////
// Define RAPIDS_DATASET_ROOT_DIR using a preprocessor variable to
// allow for a build to override the default. This is useful for
// having different builds for specific default dataset locations.
#ifndef RAPIDS_DATASET_ROOT_DIR
#define RAPIDS_DATASET_ROOT_DIR "/datasets"
#endif
static const std::string& get_rapids_dataset_root_dir()
{
static std::string rdrd("");
// Env var always overrides the value of RAPIDS_DATASET_ROOT_DIR
if (rdrd == "") {
const char* envVar = std::getenv("RAPIDS_DATASET_ROOT_DIR");
rdrd = (envVar != NULL) ? envVar : RAPIDS_DATASET_ROOT_DIR;
}
return rdrd;
}
|
cloud_mpi.c | #include <string>
#include <iostream>
#include <algorithm>
#include <utility>
#include <tfhe/tfhe.h>
#include <tfhe/tfhe_io.h>
#include <stdio.h>
#include <time.h>
#include <vector>
#include <cassert>
#include <sys/time.h>
#include <omp.h>
#include <fstream>
using namespace std;
ifstream read;
#define T_FILE "averagestandard.txt"
void add(LweSample *sum, LweSample *carryover, const LweSample *x, const LweSample *y, const LweSample *c, const int32_t nb_bits, const TFheGateBootstrappingCloudKeySet *keyset)
{
MPI_Init(&argc,&argv);
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int world_size;
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
//MPI test
printf("Hello World from process %d of %d\n", world_rank, world_size);
MPI_Barrier(MPI_COMM_WORLD);
const LweParams *in_out_params = keyset->params->in_out_params;
LweSample *carry = new_LweSample_array(1, in_out_params);
LweSample *axc = new_LweSample_array(1, in_out_params);
LweSample *bxc = new_LweSample_array(1, in_out_params);
bootsCOPY(carry, c, keyset);
for(int32_t i = 0; i < nb_bits; i++)
{
{
bootsXOR(axc, x + i, carry, keyset);
bootsXOR(bxc, y + i, carry, keyset);
MPI_Barrier(MPI_COMM_WORLD);
}
{
bootsXOR(sum + i, x + i, bxc, keyset);
bootsAND(axc, axc, bxc, keyset);
MPI_Barrier(MPI_COMM_WORLD);
}
bootsXOR(carry, carry, axc, keyset);
}
bootsCOPY(carryover, carry, keyset);
delete_LweSample_array(1, carry);
delete_LweSample_array(1, axc);
delete_LweSample_array(1, bxc);
}
void zero(LweSample* result, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size)
{
for(int i = 0; i < size; i++){
bootsCONSTANT(result + i, 0, keyset);}
}
void NOT(LweSample* result, const LweSample* x, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size)
{
for(int i = 0; i < size; i++){
bootsNOT(result + i, x + i, keyset);}
}
void split(LweSample *finalresult, LweSample *finalresult2, LweSample *finalresult3, LweSample *a, LweSample *b, LweSample *c, LweSample *d,LweSample *e, const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset)
{
const LweParams *in_out_params = keyset->params->in_out_params;
LweSample *sum = new_LweSample_array(32, in_out_params);
LweSample *sum2 = new_LweSample_array(32, in_out_params);
LweSample *sum3 = new_LweSample_array(32, in_out_params);
LweSample *carryover = new_LweSample_array(32, in_out_params);
LweSample *carryover2 = new_LweSample_array(32, in_out_params);
LweSample *carryover3 = new_LweSample_array(32, in_out_params);
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCONSTANT(sum + i, 0, keyset);
bootsCONSTANT(sum2 + i, 0, keyset);
bootsCONSTANT(sum3 + i, 0, keyset);
bootsCONSTANT(carryover + i, 0, keyset);
bootsCONSTANT(carryover2 + i, 0, keyset);
bootsCONSTANT(carryover3 + i, 0, keyset);
}
//adding the 2nd result with the carry
add(sum, carryover, e, b, carry, nb_bits, keyset);
add(sum2, carryover2, d, a, carryover, nb_bits, keyset);
add(sum3, carryover3, c, carryover2,carry,nb_bits, keyset);
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCOPY(finalresult + i, sum3 + i, keyset);
}
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCOPY(finalresult2 + i, sum2 + i, keyset);
}
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCOPY(finalresult3 + i, sum + i, keyset);
}
delete_LweSample_array(32, sum);
delete_LweSample_array(32, sum2);
delete_LweSample_array(32, sum3);
delete_LweSample_array(32, carryover);
delete_LweSample_array(32, carryover2);
delete_LweSample_array(32, carryover3);
}
void MPImul32(LweSample *result, LweSample *result2, LweSample *a, LweSample *b,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset)
{
MPI_Init(NULL,NULL)
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int world_size
MPI_Comm_size(MPI_COMM_WORLD, &world_size)
//mpi test
printf("Hello World from process %d of %d\n", world_rank, world_size);
MPI_Barrier(MPI_COMM_WORLD);
const LweParams *in_out_params = keyset->params->in_out_params;
//sum of the output
LweSample *sum3c1 = new_LweSample_array(32, in_out_params);
LweSample *sum3c2 = new_LweSample_array(32, in_out_params);
LweSample *tmp = new_LweSample_array(32, in_out_params);
LweSample *tmp2 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c1 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c2 = new_LweSample_array(32, in_out_params);
LweSample *carry1 = new_LweSample_array(32, in_out_params);
LweSample *carry2 = new_LweSample_array(32, in_out_params);
//set all these to 0
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCONSTANT(sum3c1 + i, 0, keyset);
bootsCONSTANT(sum3c2 + i, 0, keyset);
bootsCONSTANT(tmp + i, 0, keyset);
bootsCONSTANT(tmp2 + i, 0, keyset);
bootsCONSTANT(tmp3c1 + i, 0, keyset);
bootsCONSTANT(tmp3c2 + i, 0, keyset);
bootsCONSTANT(carry1 + i, 0, keyset);
bootsCONSTANT(carry2 + i, 0, keyset);
}
//multiply all the bits together with the other bits..
int round = 0;
for (int32_t i = 0; i < nb_bits; ++i)
{
for (int32_t k = 0; k < nb_bits; ++k)
{
//this is basically multiplying as it is an AND gate
//a(ciphertext1) should be the least significant bit
{
bootsAND(tmp + k, a + k, b + i, keyset);
MPI_Barrier(MPI_COMM_WORLD);
}
}
if (round > 0) {
for (int32_t i = 0; i < round; ++i) {
//putting number of 0s infront
bootsCONSTANT(tmp3c1 + i, 0, keyset);
}
}
//copy all the bits that fit into a int32 with the 0s inside
for (int32_t i = 0; i < 32 - round; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c1 + i + round , tmp + i, keyset);
}
}
//the rest of the bits that couldnt fit inside
for (int32_t i = 0; i < round; ++i)
{
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c2 + i, tmp + i + 32 - round, keyset);
}
}
add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset);
add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset);
round++;
}
for (int32_t i = 0; i < 32; ++i)
{
bootsCOPY(result + i, sum3c2 + i, keyset);
bootsCOPY(result2 + i, sum3c1 + i, keyset);
}
delete_LweSample_array(32, sum3c1);
delete_LweSample_array(32, sum3c2);
delete_LweSample_array(32, tmp);
delete_LweSample_array(32, tmp2);
delete_LweSample_array(32, tmp3c1);
delete_LweSample_array(32, tmp3c2);
delete_LweSample_array(32, carry1);
delete_LweSample_array(32, carry2);
}
void MPImul64(LweSample *result, LweSample *result2,LweSample *result3, LweSample *a, LweSample *b,LweSample *c,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset)
{
MPI_Init(NULL,NULL)
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int world_size
MPI_Comm_size(MPI_COMM_WORLD, &world_size)
//mpi test
printf("Hello World from process %d of %d\n", world_rank, world_size);
MPI_Barrier(MPI_COMM_WORLD);
const LweParams *in_out_params = keyset->params->in_out_params;
//sum of the output
LweSample *sum3c1 = new_LweSample_array(32, in_out_params);
LweSample *sum3c2 = new_LweSample_array(32, in_out_params);
LweSample *sum3c3 = new_LweSample_array(32, in_out_params);
LweSample *tmp = new_LweSample_array(32, in_out_params);
LweSample *tmp2 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c1 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c2 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c3 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c4 = new_LweSample_array(32, in_out_params);
LweSample *carry1 = new_LweSample_array(32, in_out_params);
LweSample *carry2 = new_LweSample_array(32, in_out_params);
LweSample *carry3 = new_LweSample_array(32, in_out_params);
LweSample *carry4 = new_LweSample_array(32, in_out_params);
//set all these to 0
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCONSTANT(sum3c1 + i, 0, keyset);
bootsCONSTANT(sum3c2 + i, 0, keyset);
bootsCONSTANT(sum3c3 + i, 0, keyset);
bootsCONSTANT(tmp + i, 0, keyset);
bootsCONSTANT(tmp2 + i, 0, keyset);
bootsCONSTANT(tmp3c1 + i, 0, keyset);
bootsCONSTANT(tmp3c2 + i, 0, keyset);
bootsCONSTANT(tmp3c3 + i, 0, keyset);
bootsCONSTANT(tmp3c4 + i, 0, keyset);
bootsCONSTANT(carry1 + i, 0, keyset);
bootsCONSTANT(carry2 + i, 0, keyset);
bootsCONSTANT(carry3 + i, 0, keyset);
bootsCONSTANT(carry4 + i, 0, keyset);
}
//multiply all the bits together with the other bits..
int round = 0;
int counter1 = 0;
int counter2 = 0;
for (int32_t i = 0; i < nb_bits; ++i)
{
for (int32_t k = 0; k < nb_bits; ++k)
{
//this is basically multiplying as it is an AND gate
//a(ciphertext1) should be the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsAND(tmp + k, a + k, c + i, keyset);
#pragma omp section
bootsAND(tmp2 + k, b + k, c + i, keyset);
}
}
counter1 = 32 - round;
counter2 = 32 - counter1;
if (round > 0) {
for (int32_t i = 0; i < round; ++i) {
//putting number of 0s infront
bootsCONSTANT(tmp3c1 + i, 0, keyset);
}
}
//copy all the bits that fit into a int32 with the 0s inside
//tmp to tmp3c1
for (int32_t i = 0; i < counter1; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c1 + i + round , tmp + i, keyset);
}
}
//remaining of tmp to tmp3c2
for (int32_t i = 0; i < counter2; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset);
}
}
//some of tmp2 to remaining of tmp3c2
//repeats 31 times
for (int32_t i = 0; i < counter1; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset);
}
}
//the rest of tmp2 to tmp3c3
//repeats 1 time
for (int32_t i = 0; i < counter2; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset);
}
}
add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset);
add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset);
add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset);
round++;
}
for (int32_t i = 0; i < 32; ++i)
{
bootsCOPY(result + i, sum3c3 + i, keyset);
bootsCOPY(result2 + i, sum3c2 + i, keyset);
bootsCOPY(result3 + i, sum3c1 + i, keyset);
}
delete_LweSample_array(32, sum3c1);
delete_LweSample_array(32, sum3c2);
delete_LweSample_array(32, sum3c3);
delete_LweSample_array(32, tmp);
delete_LweSample_array(32, tmp2);
delete_LweSample_array(32, tmp3c1);
delete_LweSample_array(32, tmp3c2);
delete_LweSample_array(32, tmp3c3);
delete_LweSample_array(32, tmp3c4);
delete_LweSample_array(32, carry1);
delete_LweSample_array(32, carry2);
delete_LweSample_array(32, carry3);
delete_LweSample_array(32, carry4);
}
void MPImul128(LweSample *result, LweSample *result2,LweSample *result3,LweSample *result4,LweSample *result5, LweSample *a, LweSample *b,LweSample *c,LweSample *d, LweSample *e,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset)
{
MPI_Init(NULL,NULL)
int world_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
int world_size
MPI_Comm_size(MPI_COMM_WORLD, &world_size)
//mpi test
printf("Hello World from process %d of %d\n", world_rank, world_size);
MPI_Barrier(MPI_COMM_WORLD);
const LweParams *in_out_params = keyset->params->in_out_params;
//sum of the output
LweSample *sum3c1 = new_LweSample_array(32, in_out_params);
LweSample *sum3c2 = new_LweSample_array(32, in_out_params);
LweSample *sum3c3 = new_LweSample_array(32, in_out_params);
LweSample *sum3c4 = new_LweSample_array(32, in_out_params);
LweSample *sum3c5 = new_LweSample_array(32, in_out_params);
LweSample *tmp = new_LweSample_array(32, in_out_params);
LweSample *tmp2 = new_LweSample_array(32, in_out_params);
LweSample *tmp3 = new_LweSample_array(32, in_out_params);
LweSample *tmp4 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c1 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c2 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c3 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c4 = new_LweSample_array(32, in_out_params);
LweSample *tmp3c5 = new_LweSample_array(32, in_out_params);
LweSample *carry1 = new_LweSample_array(32, in_out_params);
LweSample *carry2 = new_LweSample_array(32, in_out_params);
LweSample *carry3 = new_LweSample_array(32, in_out_params);
LweSample *carry4 = new_LweSample_array(32, in_out_params);
LweSample *carry5 = new_LweSample_array(32, in_out_params);
//set all these to 0
for (int32_t i = 0; i < nb_bits; ++i)
{
bootsCONSTANT(sum3c1 + i, 0, keyset);
bootsCONSTANT(sum3c2 + i, 0, keyset);
bootsCONSTANT(sum3c3 + i, 0, keyset);
bootsCONSTANT(sum3c4 + i, 0, keyset);
bootsCONSTANT(sum3c5 + i, 0, keyset);
bootsCONSTANT(tmp + i, 0, keyset);
bootsCONSTANT(tmp2 + i, 0, keyset);
bootsCONSTANT(tmp3 + i, 0, keyset);
bootsCONSTANT(tmp4 + i, 0, keyset);
bootsCONSTANT(tmp3c1 + i, 0, keyset);
bootsCONSTANT(tmp3c2 + i, 0, keyset);
bootsCONSTANT(tmp3c3 + i, 0, keyset);
bootsCONSTANT(tmp3c4 + i, 0, keyset);
bootsCONSTANT(tmp3c5 + i, 0, keyset);
bootsCONSTANT(carry1 + i, 0, keyset);
bootsCONSTANT(carry2 + i, 0, keyset);
bootsCONSTANT(carry3 + i, 0, keyset);
bootsCONSTANT(carry4 + i, 0, keyset);
bootsCONSTANT(carry5 + i, 0, keyset);
}
//multiply all the bits together with the other bits..
int round = 0;
int counter1 = 0;
int counter2 = 0;
for (int32_t i = 0; i < nb_bits; ++i)
{
for (int32_t k = 0; k < nb_bits; ++k)
{
//this is basically multiplying as it is an AND gate
//a(ciphertext1) should be the least significant bit
#pragma omp parallel sections num_threads(4)
{
#pragma omp section
bootsAND(tmp + k, a + k, e + i, keyset);
#pragma omp section
bootsAND(tmp2 + k, b + k, e + i, keyset);
#pragma omp section
bootsAND(tmp3 + k, c + k, e + i, keyset);
#pragma omp section
bootsAND(tmp4 + k, d + k, e + i, keyset);
}
}
counter1 = 32 - round;
counter2 = 32 - counter1;
if (round > 0) {
for (int32_t i = 0; i < round; ++i) {
//putting number of 0s infront
bootsCONSTANT(tmp3c1 + i, 0, keyset);
}
}
//copy all the bits that fit into a int32 with the 0s inside
//tmp to tmp3c1
for (int32_t i = 0; i < counter1; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c1 + i + round , tmp + i, keyset);
}
}
//remaining of tmp to tmp3c2
for (int32_t i = 0; i < counter2; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset);
}
}
//some of tmp2 to remaining of tmp3c2
for (int32_t i = 0; i < counter1; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset);
}
}
//remaining tmp2 to tmp3c3
for (int32_t i = 0; i < counter2; ++i)
{
// +round cause infront has the 0s
//tmp is the least significant bit
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset);
}
}
//some of tmp3 to remaining tmp3c3
for (int32_t i = 0; i < counter1; ++i)
{
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c3 + i + counter2, tmp3 + i, keyset);
}
}
//rest of tmp3 to tmp3c4
for (int32_t i = 0; i < counter2; ++i)
{
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c4 + i, tmp3 + i + counter1, keyset);
}
}
//some of tmp4 to remaining tmp3c4
for (int32_t i = 0; i < counter1; ++i)
{
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c4 + i + counter2, tmp4 + i, keyset);
}
}
//rest of tmp4 to tmp3c5
for (int32_t i = 0; i < counter2; ++i)
{
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
bootsCOPY(tmp3c5 + i, tmp4 + i + counter1, keyset);
}
}
add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset);
add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset);
add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset);
add(sum3c4, carry4, sum3c4, tmp3c4, carry3, 32, keyset);
add(sum3c5, carry5, sum3c5, tmp3c5, carry4, 32, keyset);
round++;
}
for (int32_t i = 0; i < 32; ++i)
{
bootsCOPY(result + i, sum3c5 + i, keyset);
bootsCOPY(result2 + i, sum3c4 + i, keyset);
bootsCOPY(result3 + i, sum3c3 + i, keyset);
bootsCOPY(result4 + i, sum3c2 + i, keyset);
bootsCOPY(result5 + i, sum3c1 + i, keyset);
}
delete_LweSample_array(32, sum3c1);
delete_LweSample_array(32, sum3c2);
delete_LweSample_array(32, sum3c3);
delete_LweSample_array(32, sum3c4);
delete_LweSample_array(32, sum3c5);
delete_LweSample_array(32, tmp);
delete_LweSample_array(32, tmp2);
delete_LweSample_array(32, tmp3);
delete_LweSample_array(32, tmp4);
delete_LweSample_array(32, tmp3c1);
delete_LweSample_array(32, tmp3c2);
delete_LweSample_array(32, tmp3c3);
delete_LweSample_array(32, tmp3c4);
delete_LweSample_array(32, tmp3c5);
delete_LweSample_array(32, carry1);
delete_LweSample_array(32, carry2);
delete_LweSample_array(32, carry3);
delete_LweSample_array(32, carry4);
delete_LweSample_array(32, carry5);
}
int main() {
// dragonfly_cipher_cloud should have already appended 2 cipherstreams into cloud.data
printf("Reading the key...\n");
// reads the cloud key from file
FILE* cloud_key = fopen("cloud.key", "rb");
TFheGateBootstrappingCloudKeySet* bk = new_tfheGateBootstrappingCloudKeySet_fromFile(cloud_key);
fclose(cloud_key);
// reads the nbit key from file
FILE* nbit_key = fopen("nbit.key","rb");
TFheGateBootstrappingSecretKeySet* nbitkey = new_tfheGateBootstrappingSecretKeySet_fromFile(nbit_key);
fclose(nbit_key);
// if necessary, the params are inside the key
const TFheGateBootstrappingParameterSet* params = bk->params;
// if necessary, the params are inside the key
const TFheGateBootstrappingParameterSet* nbitparams = nbitkey->params;
// Create ciphertext blocks for negative1, bit1, negative2, bit2 and values
LweSample* ciphertextbit = new_gate_bootstrapping_ciphertext_array(32, nbitparams);
LweSample* ciphertextnegative1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams);
LweSample* ciphertextbit1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams);
LweSample* ciphertextnegative2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams);
LweSample* ciphertextbit2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams);
LweSample* ciphertext1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext9 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext10 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext11 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext12 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext13 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext14 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext15 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertext16 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertextcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* ciphertextcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
printf("Reading input 1...\n");
// reads ciphertexts from cloud.data
FILE* cloud_data = fopen("cloud.data", "rb");
for (int i = 0; i<32; i++) // line0
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative1[i], nbitparams);
for (int i = 0; i<32; i++) // line1
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit1[i], nbitparams);
// Decrypts bit size1
int32_t int_bit1 = 0;
for (int i=0; i<32; i++) {
int ai = bootsSymDecrypt(&ciphertextbit1[i],nbitkey)>0;
int_bit1 |= (ai<<i); }
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext1[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext2[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext3[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext4[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext5[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext6[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext7[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext8[i], params);
for (int i = 0; i<32; i++) // line10
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry1[i], params);
printf("Reading input 2...\n");
for (int i = 0; i<32; i++) // line11
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative2[i], nbitparams);
for (int i = 0; i<32; i++) // line12
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit2[i], nbitparams);
// Decrypts bit size2
int32_t int_bit2 = 0;
for (int i=0; i<32; i++) {
int ai = bootsSymDecrypt(&ciphertextbit2[i],nbitkey)>0;
int_bit2 |= (ai<<i); }
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext9[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext10[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext11[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext12[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext13[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext14[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext15[i], params);
for (int i=0; i<32; i++)
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext16[i], params);
for (int i = 0; i<32; i++) // line21
import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry2[i], params);
printf("Reading operation code...\n");
// Get Operation Code from File
int32_t int_op;
read.open("operator.txt");
read >> int_op;
// Homomorphic encryption to add negative1 and negative2 ciphertexts
LweSample* ciphertextnegative = new_gate_bootstrapping_ciphertext_array(32, nbitparams);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
// add(ciphertextnegative, carry1, ciphertextnegative1, ciphertextnegative2, ciphertextcarry1, 32, nbitcloudkey); // NOTE
// Decrypts Negative1
int32_t int_negative1 = 0;
for (int i=0; i<32; i++) {
int ai = bootsSymDecrypt(&ciphertextnegative1[i],nbitkey)>0;
int_negative1 |= (ai<<i); }
std::cout << int_negative1 << " => negative1" << "\n";
// convert first value negativity code from 2 to 1
if (int_negative1 == 2){
int_negative1 = 1;}
// Decrypts Negative2
int32_t int_negative2 = 0;
for (int i=0; i<32; i++) {
int ai = bootsSymDecrypt(&ciphertextnegative2[i],nbitkey)>0;
int_negative2 |= (ai<<i); }
std::cout << int_negative2 << " => negative2" << "\n";
// Add Negatives.
// If both v1 & v2 are positive, int_negative = 0
// If only v1 is negative, int_negative = 1
// If only v2 is negative, int_negative = 2
// If both v1 & v2 are negative, int_negative = 3
int32_t int_negative;
int_negative = (int_negative1 + int_negative2);
// std::cout << int_negative << " -> negatives" << "\n";
//export the negative and bit data for the verif
FILE* answer_data = fopen("answer.data", "wb");
// Write negative to answer.data
int32_t ciphernegative = 0;
if (int_negative == 1){
ciphernegative = 1;
}
if (int_negative == 2){
ciphernegative = 2;
}
if (int_negative == 3){
ciphernegative = 4;
}
for (int i=0; i<32; i++) {
bootsSymEncrypt(&ciphertextnegative[i], (ciphernegative>>i)&1, nbitkey);
}
for (int i = 0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextnegative[i], nbitparams);
std::cout << ciphernegative << " => total negatives" << "\n";
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative);
// Compare bit sizes
int32_t int_bit = 0;
if (int_op == 4){
if (int_bit1 >= int_bit2){int_bit = (int_bit1 * 2);}
else{int_bit = (int_bit2 * 2);}
for (int i=0; i<32; i++) {
bootsSymEncrypt(&ciphertextbit[i], (int_bit>>i)&1, nbitkey);}
for (int i = 0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit[i], nbitparams);
std::cout << int_bit << " written to answer.data" << "\n";
if (int_bit1 >= int_bit2){int_bit = int_bit1;}
else{int_bit = int_bit2;}
}
else if (int_bit1 >= int_bit2) {
int_bit = int_bit1;
for (int i = 0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit1[i], nbitparams);
std::cout << int_bit << " written to answer.data" << "\n";
}
else{
int_bit = int_bit2;
for (int i = 0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit2[i], nbitparams);
std::cout << int_bit << " written to answer.data" << "\n";
}
fclose(cloud_data);
// If trying to multiply a 256 bit number
if ((int_op == 4) && (int_bit >= 256)){
std::cout << "Cannot multiply 256 bit number!" << "\n";
fclose(answer_data);
return 126;
}
// Addition
//if (the operation is add AND (both numbers are positive OR both numbers are negative)) OR (the operation is subtract AND either number is negative)
// A+B, [(-A)+(-B)], A-(-B), (-A)-(B)
if ((int_op == 1 && (int_negative != 1 && int_negative != 2 )) || (int_op == 2 && (int_negative == 1 || int_negative == 2))) {
if (int_op == 1){
std::cout << int_bit << " bit Addition computation" << "\n";
}else{
std::cout << int_bit << " bit Subtraction computation" << "\n";
}
//32 Bit Addition
if (int_bit == 32)
{
// Ciphertext to hold the result and carry
LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
printf("Doing the homomorphic computation...\n");
//Adding component
add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk);
// Timings
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
// export the result ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params);
for (int i=0; i<32; i++) // 2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
printf("writing the answer to file...\n");
//Clean up
delete_gate_bootstrapping_ciphertext_array(32, result);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
//64 Bit Addition
if (int_bit == 64)
{
//Ciphertext to hold the result and carry
LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
printf("Doing the homomorphic computation...\n");
//Adding component
add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
// export the result ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
//Clean up
delete_gate_bootstrapping_ciphertext_array(32, result);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
//128 Bit Addition
if (int_bit == 128)
{
//Ciphertext to hold the result and carry
LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
printf("Doing the homomorphic computation...\n");
//Adding component
add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk);
add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk);
add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk);
// Timing
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
// export the result ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // result3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++) // result4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
//Clean up
delete_gate_bootstrapping_ciphertext_array(32, result);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, carry3);
delete_gate_bootstrapping_ciphertext_array(32, carry4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext11);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext12);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
//256 Bit Addition
if (int_bit == 256)
{
// do some operations on the ciphertexts: here, we will compute the
// addition of the two
LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params);
// Timing
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk);
add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk);
add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk);
add(result5, carry5, ciphertext5, ciphertext13, carry4, 32, bk);
add(result6, carry6, ciphertext6, ciphertext14, carry5, 32, bk);
add(result7, carry7, ciphertext7, ciphertext15, carry6, 32, bk);
add(result8, carry8, ciphertext8, ciphertext16, carry7, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
// export the 64 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params);
for (int i=0; i<32; i++)
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
// clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, result);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, result5);
delete_gate_bootstrapping_ciphertext_array(32, result6);
delete_gate_bootstrapping_ciphertext_array(32, result7);
delete_gate_bootstrapping_ciphertext_array(32, result8);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, carry3);
delete_gate_bootstrapping_ciphertext_array(32, carry4);
delete_gate_bootstrapping_ciphertext_array(32, carry5);
delete_gate_bootstrapping_ciphertext_array(32, carry6);
delete_gate_bootstrapping_ciphertext_array(32, carry7);
delete_gate_bootstrapping_ciphertext_array(32, carry8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext5);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext6);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext7);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext11);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext12);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext13);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext14);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext15);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext16);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
}
// Subtraction
// If the operation is subtract OR (the operation is addition AND either one of the values are negative) A-B, A+(-B), (-A)+B
else if (int_op == 2 || (int_op == 1 && (int_negative == 1 || int_negative == 2))){
// Normal Subtraction computation with no negative numbers A-B OR Addition with 2nd number negative A+(-B)
if ((int_op == 2 && int_negative == 0) || (int_op == 1 && int_negative == 2)){
if (int_op == 2){
std::cout << int_bit << " bit Subtraction computation" << "\n";
}else {
std::cout << int_bit << " bit Addition computation with 2nd value negative" << "\n";
}
//32 Bit Subtraction
if(int_bit == 32)
{
printf("Doing the homomorphic computation...\n");
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the second input value
NOT(inverse1, ciphertext9, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b)
add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) //result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // 2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
//64 Bit Subtraction
if(int_bit == 64)
{
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
printf("Doing the homomorphic computation...\n");
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the second input value
NOT(inverse1, ciphertext9, bk, 32);
NOT(inverse2, ciphertext10, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
zero(tempcarry2, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
//Add the rest of the inverted
add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b)
add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, inverse2);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry2);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult2);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry2);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
//128 Bit Subtraction
if(int_bit == 128)
{
// reads the 2x32 ciphertexts from the cloud file
printf("Doing the homomorphic computation...\n");
//do some operations on the ciphertexts: here, we will compute the
//difference of the two
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the second input value
NOT(inverse1, ciphertext9, bk, 32);
NOT(inverse2, ciphertext10, bk, 32);
NOT(inverse3, ciphertext11, bk, 32);
NOT(inverse4, ciphertext12, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
zero(tempcarry2, bk, 32);
zero(tempcarry3, bk, 32);
zero(tempcarry4, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
//Add the rest of the inverted
add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk);
add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk);
add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b)
add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk);
add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk);
add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // result3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++) // result4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
//clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, inverse2);
delete_gate_bootstrapping_ciphertext_array(32, inverse3);
delete_gate_bootstrapping_ciphertext_array(32, inverse4);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry2);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry3);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry4);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult2);
delete_gate_bootstrapping_ciphertext_array(32, twosresult3);
delete_gate_bootstrapping_ciphertext_array(32, twosresult4);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry2);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry3);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry4);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, carry3);
delete_gate_bootstrapping_ciphertext_array(32, carry4);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext11);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext12);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
//256 Bit Subtraction
if (int_bit == 256)
{
// reads the 2x32 ciphertexts from the cloud file
printf("Doing the homomorphic computation...\n");
//do some operations on the ciphertexts: here, we will compute the
//difference of the two
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the second input value
NOT(inverse1, ciphertext9, bk, 32);
NOT(inverse2, ciphertext10, bk, 32);
NOT(inverse3, ciphertext11, bk, 32);
NOT(inverse4, ciphertext12, bk, 32);
NOT(inverse5, ciphertext13, bk, 32);
NOT(inverse6, ciphertext14, bk, 32);
NOT(inverse7, ciphertext15, bk, 32);
NOT(inverse8, ciphertext16, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
zero(tempcarry2, bk, 32);
zero(tempcarry3, bk, 32);
zero(tempcarry4, bk, 32);
zero(tempcarry5, bk, 32);
zero(tempcarry6, bk, 32);
zero(tempcarry7, bk, 32);
zero(tempcarry8, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
//Add the rest of the inverted
add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk);
add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk);
add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk);
add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk);
add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk);
add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk);
add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b)
add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk);
add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk);
add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk);
add(result5, carry5, ciphertext5, twosresult5, carry4, 32, bk);
add(result6, carry6, ciphertext6, twosresult6, carry5, 32, bk);
add(result7, carry7, ciphertext7, twosresult7, carry6, 32, bk);
add(result8, carry8, ciphertext8, twosresult8, carry7, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("Writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) //result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) //result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) //result3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++) //result4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params);
for (int i=0; i<32; i++) //result5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params);
for (int i=0; i<32; i++) //result6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params);
for (int i=0; i<32; i++) //result7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params);
for (int i=0; i<32; i++) //result8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
//clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, inverse2);
delete_gate_bootstrapping_ciphertext_array(32, inverse3);
delete_gate_bootstrapping_ciphertext_array(32, inverse4);
delete_gate_bootstrapping_ciphertext_array(32, inverse5);
delete_gate_bootstrapping_ciphertext_array(32, inverse6);
delete_gate_bootstrapping_ciphertext_array(32, inverse7);
delete_gate_bootstrapping_ciphertext_array(32, inverse8);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry2);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry3);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry4);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry5);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry6);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry7);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry8);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult2);
delete_gate_bootstrapping_ciphertext_array(32, twosresult3);
delete_gate_bootstrapping_ciphertext_array(32, twosresult4);
delete_gate_bootstrapping_ciphertext_array(32, twosresult5);
delete_gate_bootstrapping_ciphertext_array(32, twosresult6);
delete_gate_bootstrapping_ciphertext_array(32, twosresult7);
delete_gate_bootstrapping_ciphertext_array(32, twosresult8);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry2);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry3);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry4);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry5);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry6);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry7);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry8);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, result5);
delete_gate_bootstrapping_ciphertext_array(32, result6);
delete_gate_bootstrapping_ciphertext_array(32, result7);
delete_gate_bootstrapping_ciphertext_array(32, result8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext5);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext6);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext7);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext11);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext12);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext13);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext14);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext15);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext16);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
}
//Addition (for subtraction) with value 1 being a negative number (-A)+B
else{
if (int_op == 2){
std::cout << int_bit << " bit Subtraction computation" << "\n";
}else {
std::cout << int_bit << " bit Addition computation with 1st value negative" << "\n";
}
if(int_bit == 32){
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
printf("Doing the homomorphic computation...\n");
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the first input value
NOT(inverse1, ciphertext1, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b)
add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // 2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
else if (int_bit == 64){
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
printf("Doing the homomorphic computation...\n");
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the first input value
NOT(inverse1, ciphertext1, bk, 32);
NOT(inverse2, ciphertext2, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
zero(tempcarry2, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
//Add the rest of the inverted
add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first inversed value to the second value, (-a) + b
add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) //result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, inverse2);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry2);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult2);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry2);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
else if (int_bit == 128){
printf("Doing the homomorphic computation...\n");
//do some operations on the ciphertexts: here, we will compute the
//difference of the two
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the first input value
NOT(inverse1, ciphertext1, bk, 32);
NOT(inverse2, ciphertext2, bk, 32);
NOT(inverse3, ciphertext3, bk, 32);
NOT(inverse4, ciphertext4, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
zero(tempcarry2, bk, 32);
zero(tempcarry3, bk, 32);
zero(tempcarry4, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
//Add the rest of the inverted
add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk);
add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk);
add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first inversed value to the second value,(-a) + b
add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk);
add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk);
add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // result3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++) // result4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
//clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, inverse2);
delete_gate_bootstrapping_ciphertext_array(32, inverse3);
delete_gate_bootstrapping_ciphertext_array(32, inverse4);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry2);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry3);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry4);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult2);
delete_gate_bootstrapping_ciphertext_array(32, twosresult3);
delete_gate_bootstrapping_ciphertext_array(32, twosresult4);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry2);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry3);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry4);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, carry3);
delete_gate_bootstrapping_ciphertext_array(32, carry4);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext5);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext6);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext7);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
else if (int_bit == 256){
printf("Doing the homomorphic computation...\n");
//do some operations on the ciphertexts: here, we will compute the
//difference of the two
LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//Subtraction Process
//Step 1. Inverse the 32 bit chunks in the first input value
NOT(inverse1, ciphertext1, bk, 32);
NOT(inverse2, ciphertext2, bk, 32);
NOT(inverse3, ciphertext3, bk, 32);
NOT(inverse4, ciphertext4, bk, 32);
NOT(inverse5, ciphertext5, bk, 32);
NOT(inverse6, ciphertext6, bk, 32);
NOT(inverse7, ciphertext7, bk, 32);
NOT(inverse8, ciphertext8, bk, 32);
//iniailize tempcarry and temp carry to 0
zero(temp, bk, 32);
zero(tempcarry1, bk, 32);
zero(tempcarry2, bk, 32);
zero(tempcarry3, bk, 32);
zero(tempcarry4, bk, 32);
zero(tempcarry5, bk, 32);
zero(tempcarry6, bk, 32);
zero(tempcarry7, bk, 32);
zero(tempcarry8, bk, 32);
//Assign temp to have a value of 1 for 2nd complement
bootsCONSTANT(temp, 1, bk);
//Add 1 to inverted
add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk);
//Add the rest of the inverted
add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk);
add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk);
add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk);
add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk);
add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk);
add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk);
add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk);
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params);
//Do the addition, this is basically adding the first inversed value to the second value, (-a) + b
add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk);
add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk);
add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk);
add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk);
add(result5, carry5, ciphertext13, twosresult5, carry4, 32, bk);
add(result6, carry6, ciphertext14, twosresult6, carry5, 32, bk);
add(result7, carry7, ciphertext15, twosresult7, carry6, 32, bk);
add(result8, carry8, ciphertext16, twosresult8, carry7, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
printf("Writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // 1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // 2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
//clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, temp);
delete_gate_bootstrapping_ciphertext_array(32, inverse1);
delete_gate_bootstrapping_ciphertext_array(32, inverse2);
delete_gate_bootstrapping_ciphertext_array(32, inverse3);
delete_gate_bootstrapping_ciphertext_array(32, inverse4);
delete_gate_bootstrapping_ciphertext_array(32, inverse5);
delete_gate_bootstrapping_ciphertext_array(32, inverse6);
delete_gate_bootstrapping_ciphertext_array(32, inverse7);
delete_gate_bootstrapping_ciphertext_array(32, inverse8);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry1);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry2);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry3);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry4);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry5);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry6);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry7);
delete_gate_bootstrapping_ciphertext_array(32, tempcarry8);
delete_gate_bootstrapping_ciphertext_array(32, twosresult1);
delete_gate_bootstrapping_ciphertext_array(32, twosresult2);
delete_gate_bootstrapping_ciphertext_array(32, twosresult3);
delete_gate_bootstrapping_ciphertext_array(32, twosresult4);
delete_gate_bootstrapping_ciphertext_array(32, twosresult5);
delete_gate_bootstrapping_ciphertext_array(32, twosresult6);
delete_gate_bootstrapping_ciphertext_array(32, twosresult7);
delete_gate_bootstrapping_ciphertext_array(32, twosresult8);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry1);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry2);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry3);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry4);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry5);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry6);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry7);
delete_gate_bootstrapping_ciphertext_array(32, twoscarry8);
delete_gate_bootstrapping_ciphertext_array(32, carry1);
delete_gate_bootstrapping_ciphertext_array(32, carry2);
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, result5);
delete_gate_bootstrapping_ciphertext_array(32, result6);
delete_gate_bootstrapping_ciphertext_array(32, result7);
delete_gate_bootstrapping_ciphertext_array(32, result8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext5);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext6);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext7);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext8);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext11);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext12);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext13);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext14);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext15);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext16);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
}
}
// If Multiplication
else if (int_op == 4){
std::cout << int_bit << " bit Multiplication computation" << "\n";
if (int_bit == 128){
printf("Doing the homomorphic computation...\n");
// do some operations on the ciphertexts: here, we will compute the
// product of the two
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result9 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result10 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result11 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result12 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result13 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result14 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result15 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result16 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result17 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result18 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result19 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result20 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum9 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum10 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum11 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum12 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum13 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum14 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* sum15 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover7 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover8 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover9 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover10 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover11 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover12 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover13 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover14 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* carryover15 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//result1
MPImul128(result1, result2, result3, result4, result5, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext9,ciphertextcarry1, 32, bk);
//result2
MPImul128(result6, result7, result8, result9, result10, ciphertext1, ciphertext2, ciphertext3, ciphertext4, ciphertext10,ciphertextcarry1, 32, bk);
//result3
MPImul128(result11, result12, result13, result14, result15, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext11,ciphertextcarry1, 32, bk);
//result4
MPImul128(result16,result17, result18,result19,result20, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext12,ciphertextcarry1, 32, bk);
add(sum1, carryover1, result10, result4, ciphertextcarry1, 32, bk);
add(sum2, carryover2, result9, result3,carryover1,32, bk);
add(sum3, carryover3, result8, result2,carryover2,32, bk);
add(sum4, carryover4, result7, result1,carryover3,32, bk);
add(sum5, carryover5, result6, ciphertextcarry1,carryover4,32, bk);
add(sum6, carryover6, sum2, result15,carryover5,32, bk);
add(sum7, carryover7, sum3, result14,carryover6,32, bk);
add(sum8, carryover8, sum4, result13,carryover7,32, bk);
add(sum9, carryover9, sum5, result12,carryover8,32, bk);
add(sum10, carryover10, result11, ciphertextcarry1,carryover9,32, bk);
add(sum11, carryover11, sum7, result20,carryover10,32, bk);
add(sum12, carryover12, sum8, result19,carryover11,32, bk);
add(sum13, carryover13, sum9, result18,carryover12,32, bk);
add(sum14, carryover14, sum10, result17,carryover13,32, bk);
add(sum15, carryover15, result16 , ciphertextcarry1,carryover14,32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
// write computation time to file
FILE *t_file;
t_file = fopen(T_FILE, "a");
fprintf(t_file, "%lf\n", get_time);
fclose(t_file);
MPI_Barrier(MPI_COMM_WORLD);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum1[i], params);
for (int i=0; i<32; i++) // result3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum6[i], params);
for (int i=0; i<32; i++) // result4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum11[i], params);
for (int i=0; i<32; i++) // result5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum12[i], params);
for (int i=0; i<32; i++) // result6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum13[i], params);
for (int i=0; i<32; i++) // result7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum14[i], params);
for (int i=0; i<32; i++) // result8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum15[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
// clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, result5);
delete_gate_bootstrapping_ciphertext_array(32, result6);
delete_gate_bootstrapping_ciphertext_array(32, result7);
delete_gate_bootstrapping_ciphertext_array(32, result8);
delete_gate_bootstrapping_ciphertext_array(32, result9);
delete_gate_bootstrapping_ciphertext_array(32, result10);
delete_gate_bootstrapping_ciphertext_array(32, result11);
delete_gate_bootstrapping_ciphertext_array(32, result12);
delete_gate_bootstrapping_ciphertext_array(32, result13);
delete_gate_bootstrapping_ciphertext_array(32, result14);
delete_gate_bootstrapping_ciphertext_array(32, result15);
delete_gate_bootstrapping_ciphertext_array(32, result16);
delete_gate_bootstrapping_ciphertext_array(32, result17);
delete_gate_bootstrapping_ciphertext_array(32, result18);
delete_gate_bootstrapping_ciphertext_array(32, result19);
delete_gate_bootstrapping_ciphertext_array(32, result20);
delete_gate_bootstrapping_ciphertext_array(32, sum1);
delete_gate_bootstrapping_ciphertext_array(32, sum2);
delete_gate_bootstrapping_ciphertext_array(32, sum3);
delete_gate_bootstrapping_ciphertext_array(32, sum4);
delete_gate_bootstrapping_ciphertext_array(32, sum5);
delete_gate_bootstrapping_ciphertext_array(32, sum6);
delete_gate_bootstrapping_ciphertext_array(32, sum7);
delete_gate_bootstrapping_ciphertext_array(32, sum8);
delete_gate_bootstrapping_ciphertext_array(32, sum9);
delete_gate_bootstrapping_ciphertext_array(32, sum10);
delete_gate_bootstrapping_ciphertext_array(32, sum11);
delete_gate_bootstrapping_ciphertext_array(32, sum12);
delete_gate_bootstrapping_ciphertext_array(32, sum13);
delete_gate_bootstrapping_ciphertext_array(32, sum14);
delete_gate_bootstrapping_ciphertext_array(32, sum15);
delete_gate_bootstrapping_ciphertext_array(32, carryover1);
delete_gate_bootstrapping_ciphertext_array(32, carryover2);
delete_gate_bootstrapping_ciphertext_array(32, carryover3);
delete_gate_bootstrapping_ciphertext_array(32, carryover4);
delete_gate_bootstrapping_ciphertext_array(32, carryover5);
delete_gate_bootstrapping_ciphertext_array(32, carryover6);
delete_gate_bootstrapping_ciphertext_array(32, carryover7);
delete_gate_bootstrapping_ciphertext_array(32, carryover8);
delete_gate_bootstrapping_ciphertext_array(32, carryover9);
delete_gate_bootstrapping_ciphertext_array(32, carryover10);
delete_gate_bootstrapping_ciphertext_array(32, carryover11);
delete_gate_bootstrapping_ciphertext_array(32, carryover12);
delete_gate_bootstrapping_ciphertext_array(32, carryover13);
delete_gate_bootstrapping_ciphertext_array(32, carryover14);
delete_gate_bootstrapping_ciphertext_array(32, carryover15);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext3);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext4);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext11);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext12);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
else if (int_bit == 64){
printf("Doing the homomorphic computation...\n");
// do some operations on the ciphertexts: here, we will compute the
// product of the two
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* finalresult = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* finalresult2 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* finalresult3 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//result1
MPImul64(result1,result2, result3, ciphertext1, ciphertext2,ciphertext9,ciphertextcarry1, 32, bk);
//result2
MPImul64(result4,result5, result6, ciphertext1, ciphertext2,ciphertext10,ciphertextcarry1, 32, bk);
split(finalresult,finalresult2, finalresult3, result1, result2,result4,result5,result6,ciphertextcarry1,32,bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
// write computation time to file
FILE *t_file;
t_file = fopen(T_FILE, "a");
fprintf(t_file, "%lf\n", get_time);
fclose(t_file);
MPI_Barrier(MPI_COMM_WORLD);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult3[i], params);
for (int i=0; i<32; i++) // result3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult2[i], params);
for (int i=0; i<32; i++) // result4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
// clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, result3);
delete_gate_bootstrapping_ciphertext_array(32, result4);
delete_gate_bootstrapping_ciphertext_array(32, result5);
delete_gate_bootstrapping_ciphertext_array(32, result6);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext10);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_ciphertext_array(32, finalresult);
delete_gate_bootstrapping_ciphertext_array(32, finalresult2);
delete_gate_bootstrapping_ciphertext_array(32, finalresult3);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
else if (int_bit == 32){
printf("Doing the homomorphic computation...\n");
// do some operations on the ciphertexts: here, we will compute the
// product of the two
LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params);
LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params);
struct timeval start, end;
double get_time;
gettimeofday(&start, NULL);
//result1
MPImul32(result1,result2,ciphertext1, ciphertext9,ciphertextcarry1, 32, bk);
gettimeofday(&end, NULL);
get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6;
printf("Computation Time: %lf[sec]\n", get_time);
// write computation time to file
FILE *t_file;
t_file = fopen(T_FILE, "a");
fprintf(t_file, "%lf\n", get_time);
fclose(t_file);
MPI_Barrier(MPI_COMM_WORLD);
printf("writing the answer to file...\n");
//export the 32 ciphertexts to a file (for the cloud)
for (int i=0; i<32; i++) // result1
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params);
for (int i=0; i<32; i++) // result2
export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params);
for (int i=0; i<32; i++) // 3
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 4
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 5
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 6
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 7
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // 8
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
for (int i=0; i<32; i++) // carry
export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params);
fclose(answer_data);
MPI_Barrier(MPI_COMM_WORLD);
// clean up all pointers
delete_gate_bootstrapping_ciphertext_array(32, result1);
delete_gate_bootstrapping_ciphertext_array(32, result2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext1);
delete_gate_bootstrapping_ciphertext_array(32, ciphertext9);
delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1);
delete_gate_bootstrapping_cloud_keyset(bk);
delete_gate_bootstrapping_secret_keyset(nbitkey);
}
}
}
|
centr.h | namespace TSnap {
/////////////////////////////////////////////////
// Node centrality measures (See: http://en.wikipedia.org/wiki/Centrality)
/// Returns Degree centrality of a given node NId.
/// Degree centrality if a node is defined as its degree/(N-1), where N is the number of nodes in the network.
double GetDegreeCentr(const PUNGraph& Graph, const int& NId);
/// Returns Group Degree centrality of a given group NId.
/// Degree centrality if a node is defined as its degree/(N-1), where N is the number of nodes in the network.
//double GetGroupDegreeCentr(const PUNGraph& Graph, const PUNGraph& Group);
double GetGroupDegreeCentr(const PUNGraph& Graph, const TIntH& GroupNodes);
/// Returns Group Degree centrality of a given group NId.
/// Degree centrality if a node is defined as its degree/(N-1), where N is the number of nodes in the network.
//double GetGroupDegreeCentr(const PUNGraph& Graph, const PUNGraph& Group);
double GetGroupClosenessCentr(const PUNGraph& Graph, const TIntH& GroupNodes);
/// Returns centrality Maximum k group.
TIntH MaxCPGreedyBetter(const PUNGraph& Graph, const int k);
/// Returns centrality Maximum k group.
TIntH MaxCPGreedyBetter1(const PUNGraph& Graph, const int k);
/// Returns centrality Maximum k group.
TIntH MaxCPGreedyBetter2(const PUNGraph& Graph, const int k);
/// Returns centrality Maximum k group.
TIntH MaxCPGreedyBetter3(const PUNGraph& Graph, const int k);
/// Event importance
TIntFltH EventImportance(const PNGraph& Graph, const int k);
/// Intersect
int Intersect(TUNGraph::TNodeI Node, TIntH NNodes);
/// Intersect
int Intersect(TUNGraph::TNodeI Node, TStr NNodes);
/// Intersect
int Intersect(TUNGraph::TNodeI Node, int *NNodes, int NNodes_br);
//Load nodes list
int Intersect1(TUNGraph::TNodeI Node, TStr NNodes);
//Load nodes list
TIntH LoadNodeList(TStr InFNmNodes);
/// Returns Farness centrality of a given node NId.
/// Farness centrality of a node is the average shortest path length to all other nodes that reside is the same connected component as the given node.
template <class PGraph> double GetFarnessCentr(const PGraph& Graph, const int& NId, const bool& Normalized=true, const bool& IsDir=false);
template <class PGraph> double GetFarnessCentrMP(const PGraph& Graph, const int& NId, const bool& Normalized=true, const bool& IsDir=false);
/// Returns weighted Farness centrality of a given node \c NId.
/// Farness centrality of a node is the average shortest path length to all other nodes that reside is the same connected component as the given node.
double GetWeightedFarnessCentr(const PNEANet Graph, const int& NId, const TFltV& Attr, const bool& Normalized=true, const bool& IsDir=false);
/// Returns Closeness centrality of a given node NId.
/// Closeness centrality of a node is defined as 1/FarnessCentrality.
template <class PGraph> double GetClosenessCentr(const PGraph& Graph, const int& NId, const bool& Normalized=true, const bool& IsDir=false);
template <class PGraph> double GetClosenessCentrMP(const PGraph& Graph, const int& NId, const bool& Normalized=true, const bool& IsDir=false);
/// Returns Closeness centrality of a given node \c NId.
/// Closeness centrality of a node is defined as 1/FarnessCentrality.
double GetWeightedClosenessCentr(const PNEANet Graph, const int& NId, const TFltV& Attr, const bool& Normalized=true, const bool& IsDir=false);
/// Returns node Eccentricity, the largest shortest-path distance from the node NId to any other node in the Graph.
/// @param IsDir false: ignore edge directions and consider edges as undirected (in case they are directed).
template <class PGraph> int GetNodeEcc(const PGraph& Graph, const int& NId, const bool& IsDir=false);
/// Computes (approximate) Node Beetweenness Centrality based on a sample of NodeFrac nodes.
/// @param NIdBtwH hash table mapping node ids to their corresponding betweenness centrality values.
/// @param NodeFrac quality of approximation. NodeFrac=1.0 gives exact betweenness values.
template<class PGraph> void GetBetweennessCentr(const PGraph& Graph, TIntFltH& NIdBtwH, const double& NodeFrac=1.0, const bool& IsDir=false);
/// Computes (approximate) weighted Node Beetweenness Centrality based on a sample of NodeFrac nodes.
/// @param NIdBtwH hash table mapping node ids to their corresponding betweenness centrality values.
/// @param NodeFrac quality of approximation. NodeFrac=1.0 gives exact betweenness values.
void GetWeightedBetweennessCentr(const PNEANet Graph, TIntFltH& NIdBtwH, const TFltV& Attr, const double& NodeFrac=1.0, const bool& IsDir=false);
/// Computes (approximate) Edge Beetweenness Centrality based on a sample of NodeFrac nodes.
/// @param EdgeBtwH hash table mapping edges (pairs of node ids) to their corresponding betweenness centrality values.
/// @param NodeFrac quality of approximation. NodeFrac=1.0 gives exact betweenness values.
template<class PGraph> void GetBetweennessCentr(const PGraph& Graph, TIntPrFltH& EdgeBtwH, const double& NodeFrac=1.0, const bool& IsDir=false);
/// Computes (approximate) weighted Edge Beetweenness Centrality based on a sample of NodeFrac nodes.
/// @param EdgeBtwH hash table mapping edges (pairs of node ids) to their corresponding betweenness centrality values.
/// @param NodeFrac quality of approximation. NodeFrac=1.0 gives exact betweenness values.
void GetWeightedBetweennessCentr(const PNEANet Graph, TIntPrFltH& EdgeBtwH, const TFltV& Attr, const double& NodeFrac=1.0, const bool& IsDir=false);
/// Computes (approximate) Node and Edge Beetweenness Centrality based on a sample of NodeFrac nodes.
/// @param NIdBtwH hash table mapping node ids to their corresponding betweenness centrality values.
/// @param EdgeBtwH hash table mapping edges (pairs of node ids) to their corresponding betweenness centrality values.
/// @param NodeFrac quality of approximation. NodeFrac=1.0 gives exact betweenness values.
template<class PGraph> void GetBetweennessCentr(const PGraph& Graph, TIntFltH& NIdBtwH, TIntPrFltH& EdgeBtwH, const double& NodeFrac=1.0, const bool& IsDir=false);
/// Computes (approximate) weighted Node and Edge Beetweenness Centrality based on a sample of NodeFrac nodes.
/// @param NIdBtwH hash table mapping node ids to their corresponding betweenness centrality values.
/// @param EdgeBtwH hash table mapping edges (pairs of node ids) to their corresponding betweenness centrality values.
/// @param NodeFrac quality of approximation. NodeFrac=1.0 gives exact betweenness values.
void GetWeightedBetweennessCentr(const PNEANet Graph, TIntFltH& NIdBtwH, TIntPrFltH& EdgeBtwH, const TFltV& Attr, const double& NodeFrac=1.0, const bool& IsDir=false);
/// Computes (approximate) Beetweenness Centrality of all nodes and all edges of the network.
/// To obtain exact betweenness values one needs to solve single-source shortest-path problem for every node.
/// To speed up the algorithm we solve the shortest-path problem for the BtwNIdV subset of nodes. This gives centrality values that are about Graph->GetNodes()/BtwNIdV.Len() times lower than the exact betweenness centrality valus.
/// See "A Faster Algorithm for Beetweenness Centrality", Ulrik Brandes, Journal of Mathematical Sociology, 2001, and
/// "Centrality Estimation in Large Networks", Urlik Brandes and Christian Pich, 2006 for more details.
template<class PGraph> void GetBetweennessCentr(const PGraph& Graph, const TIntV& BtwNIdV, TIntFltH& NodeBtwH, const bool& DoNodeCent, TIntPrFltH& EdgeBtwH, const bool& DoEdgeCent, const bool& IsDir);
/// Computes (approximate) weighted Beetweenness Centrality of all nodes and all edges of the network.
void GetWeightedBetweennessCentr(const PNEANet Graph, const TIntV& BtwNIdV, TIntFltH& NodeBtwH, const bool& DoNodeCent, TIntPrFltH& EdgeBtwH, const bool& DoEdgeCent, const TFltV& Attr, const bool& IsDir);
/// Computes Eigenvector Centrality of all nodes in the network
/// Eigenvector Centrality of a node N is defined recursively as the average of centrality values of N's neighbors in the network.
void GetEigenVectorCentr(const PUNGraph& Graph, TIntFltH& NIdEigenH, const double& Eps=1e-4, const int& MaxIter=100);
/// PageRank
/// For more info see: http://en.wikipedia.org/wiki/PageRank
template<class PGraph> void GetPageRank(const PGraph& Graph, TIntFltH& PRankH, const double& C=0.85, const double& Eps=1e-4, const int& MaxIter=100);
template<class PGraph> void GetPageRank_v1(const PGraph& Graph, TIntFltH& PRankH, const double& C=0.85, const double& Eps=1e-4, const int& MaxIter=100);
#ifdef USE_OPENMP
template<class PGraph> void GetPageRankMP(const PGraph& Graph, TIntFltH& PRankH, const double& C=0.85, const double& Eps=1e-4, const int& MaxIter=100);
#endif
/// Weighted PageRank (TODO: Use template)
int GetWeightedPageRank(const PNEANet Graph, TIntFltH& PRankH, const TStr& Attr, const double& C=0.85, const double& Eps=1e-4, const int& MaxIter=100);
#ifdef USE_OPENMP
int GetWeightedPageRankMP(const PNEANet Graph, TIntFltH& PRankH, const TStr& Attr, const double& C=0.85, const double& Eps=1e-4, const int& MaxIter=100);
#endif
/// HITS: Hubs and Authorities
/// For more info see: http://en.wikipedia.org/wiki/HITS_algorithm)
template<class PGraph> void GetHits(const PGraph& Graph, TIntFltH& NIdHubH, TIntFltH& NIdAuthH, const int& MaxIter=20);
#ifdef USE_OPENMP
template<class PGraph> void GetHitsMP(const PGraph& Graph, TIntFltH& NIdHubH, TIntFltH& NIdAuthH, const int& MaxIter=20);
#endif
/// Dijkstra Algorithm
/// For more info see: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
int GetWeightedShortestPath(const PNEANet Graph, const int& SrcNId, TIntFltH& NIdDistH, const TFltV& Attr);
/////////////////////////////////////////////////
// Implementation
template <class PGraph>
double GetFarnessCentr(const PGraph& Graph, const int& NId, const bool& Normalized, const bool& IsDir) {
TIntH NDistH(Graph->GetNodes());
TSnap::GetShortPath<PGraph>(Graph, NId, NDistH, IsDir, TInt::Mx);
double sum = 0;
for (TIntH::TIter I = NDistH.BegI(); I < NDistH.EndI(); I++) {
sum += I->Dat();
}
if (NDistH.Len() > 1) {
double centr = sum/double(NDistH.Len()-1);
if (Normalized) {
centr *= (Graph->GetNodes() - 1)/double(NDistH.Len()-1);
}
return centr;
}
else { return 0.0; }
}
template <class PGraph>
double GetFarnessCentrMP(const PGraph& Graph, const int& NId, const bool& Normalized, const bool& IsDir) {
TIntH NDistH(Graph->GetNodes());
TSnap::GetShortPath<PGraph>(Graph, NId, NDistH, IsDir, TInt::Mx);
double sum = 0;
for (TIntH::TIter I = NDistH.BegI(); I < NDistH.EndI(); I++) {
sum += I->Dat();
}
if (NDistH.Len() > 1) {
double centr = sum/double(NDistH.Len()-1);
if (Normalized) {
centr *= (Graph->GetNodes() - 1)/double(NDistH.Len()-1);
}
return centr;
}
else { return 0.0; }
}
template <class PGraph>
double GetClosenessCentr(const PGraph& Graph, const int& NId, const bool& Normalized, const bool& IsDir) {
const double Farness = GetFarnessCentr<PGraph> (Graph, NId, Normalized, IsDir);
if (Farness != 0.0) { return 1.0/Farness; }
else { return 0.0; }
return 0.0;
}
template <class PGraph>
double GetClosenessCentrMP(const PGraph& Graph, const int& NId, const bool& Normalized, const bool& IsDir) {
const double Farness = GetFarnessCentrMP<PGraph> (Graph, NId, Normalized, IsDir);
if (Farness != 0.0) { return 1.0/Farness; }
else { return 0.0; }
return 0.0;
}
template <class PGraph>
int GetNodeEcc(const PGraph& Graph, const int& NId, const bool& IsDir) {
int NodeEcc;
int Dist;
TBreathFS<PGraph> BFS(Graph);
// get shortest paths to all the nodes
BFS.DoBfs(NId, true, ! IsDir, -1, TInt::Mx);
NodeEcc = 0;
// find the largest value
for (int i = 0; i < BFS.NIdDistH.Len(); i++) {
Dist = BFS.NIdDistH[i];
if (Dist > NodeEcc) {
NodeEcc = Dist;
}
}
return NodeEcc;
}
// Page Rank -- there are two different implementations (uncomment the desired 2 lines):
// Berkhin -- (the correct way) see Algorithm 1 of P. Berkhin, A Survey on PageRank Computing, Internet Mathematics, 2005
// iGraph -- iGraph implementation(which treats leaked PageRank in a funny way)
// This implementation is an unoptimized version, it accesses nodes via a hash table.
template<class PGraph>
void GetPageRank_v1(const PGraph& Graph, TIntFltH& PRankH, const double& C, const double& Eps, const int& MaxIter) {
const int NNodes = Graph->GetNodes();
//const double OneOver = 1.0/double(NNodes);
PRankH.Gen(NNodes);
for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) {
PRankH.AddDat(NI.GetId(), 1.0/NNodes);
//IAssert(NI.GetId() == PRankH.GetKey(PRankH.Len()-1));
}
TFltV TmpV(NNodes);
for (int iter = 0; iter < MaxIter; iter++) {
int j = 0;
for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++, j++) {
TmpV[j] = 0;
for (int e = 0; e < NI.GetInDeg(); e++) {
const int InNId = NI.GetInNId(e);
const int OutDeg = Graph->GetNI(InNId).GetOutDeg();
if (OutDeg > 0) {
TmpV[j] += PRankH.GetDat(InNId) / OutDeg; }
}
TmpV[j] = C*TmpV[j]; // Berkhin (the correct way of doing it)
//TmpV[j] = C*TmpV[j] + (1.0-C)*OneOver; // iGraph
}
double diff=0, sum=0, NewVal;
for (int i = 0; i < TmpV.Len(); i++) { sum += TmpV[i]; }
const double Leaked = (1.0-sum) / double(NNodes);
for (int i = 0; i < PRankH.Len(); i++) { // re-instert leaked PageRank
NewVal = TmpV[i] + Leaked; // Berkhin
//NewVal = TmpV[i] / sum; // iGraph
diff += fabs(NewVal-PRankH[i]);
PRankH[i] = NewVal;
}
if (diff < Eps) { break; }
}
}
// Page Rank -- there are two different implementations (uncomment the desired 2 lines):
// Berkhin -- (the correct way) see Algorithm 1 of P. Berkhin, A Survey on PageRank Computing, Internet Mathematics, 2005
// iGraph -- iGraph implementation(which treats leaked PageRank in a funny way)
// This implementation is an optimized version, it builds a vector and accesses nodes via the vector.
template<class PGraph>
void GetPageRank(const PGraph& Graph, TIntFltH& PRankH, const double& C, const double& Eps, const int& MaxIter) {
const int NNodes = Graph->GetNodes();
TVec<typename PGraph::TObj::TNodeI> NV;
PRankH.Gen(NNodes);
int MxId = -1;
for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) {
NV.Add(NI);
PRankH.AddDat(NI.GetId(), 1.0/NNodes);
int Id = NI.GetId();
if (Id > MxId) {
MxId = Id;
}
}
TFltV PRankV(MxId+1);
TIntV OutDegV(MxId+1);
for (int j = 0; j < NNodes; j++) {
typename PGraph::TObj::TNodeI NI = NV[j];
int Id = NI.GetId();
PRankV[Id] = 1.0/NNodes;
OutDegV[Id] = NI.GetOutDeg();
}
TFltV TmpV(NNodes);
for (int iter = 0; iter < MaxIter; iter++) {
for (int j = 0; j < NNodes; j++) {
typename PGraph::TObj::TNodeI NI = NV[j];
TFlt Tmp = 0;
for (int e = 0; e < NI.GetInDeg(); e++) {
const int InNId = NI.GetInNId(e);
const int OutDeg = OutDegV[InNId];
if (OutDeg > 0) {
Tmp += PRankV[InNId] / OutDeg;
}
}
TmpV[j] = C*Tmp; // Berkhin (the correct way of doing it)
}
double sum = 0;
for (int i = 0; i < TmpV.Len(); i++) { sum += TmpV[i]; }
const double Leaked = (1.0-sum) / double(NNodes);
double diff = 0;
for (int i = 0; i < NNodes; i++) {
typename PGraph::TObj::TNodeI NI = NV[i];
double NewVal = TmpV[i] + Leaked; // Berkhin
int Id = NI.GetId();
diff += fabs(NewVal-PRankV[Id]);
PRankV[Id] = NewVal;
}
if (diff < Eps) { break; }
}
for (int i = 0; i < NNodes; i++) {
typename PGraph::TObj::TNodeI NI = NV[i];
PRankH[i] = PRankV[NI.GetId()];
}
}
#ifdef USE_OPENMP
// Page Rank -- there are two different implementations (uncomment the desired 2 lines):
// Berkhin -- (the correct way) see Algorithm 1 of P. Berkhin, A Survey on PageRank Computing, Internet Mathematics, 2005
// iGraph -- iGraph implementation(which treats leaked PageRank in a funny way)
// This is a parallel, optimized version.
template<class PGraph>
void GetPageRankMP(const PGraph& Graph, TIntFltH& PRankH, const double& C, const double& Eps, const int& MaxIter) {
const int NNodes = Graph->GetNodes();
TVec<typename PGraph::TObj::TNodeI> NV;
PRankH.Gen(NNodes);
int MxId = -1;
for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) {
NV.Add(NI);
PRankH.AddDat(NI.GetId(), 1.0/NNodes);
int Id = NI.GetId();
if (Id > MxId) {
MxId = Id;
}
}
TFltV PRankV(MxId+1);
TIntV OutDegV(MxId+1);
#pragma omp parallel for schedule(dynamic,10000)
for (int j = 0; j < NNodes; j++) {
typename PGraph::TObj::TNodeI NI = NV[j];
int Id = NI.GetId();
PRankV[Id] = 1.0/NNodes;
OutDegV[Id] = NI.GetOutDeg();
}
TFltV TmpV(NNodes);
for (int iter = 0; iter < MaxIter; iter++) {
#pragma omp parallel for schedule(dynamic,10000)
for (int j = 0; j < NNodes; j++) {
typename PGraph::TObj::TNodeI NI = NV[j];
TFlt Tmp = 0;
for (int e = 0; e < NI.GetInDeg(); e++) {
const int InNId = NI.GetInNId(e);
const int OutDeg = OutDegV[InNId];
if (OutDeg > 0) {
Tmp += PRankV[InNId] / OutDeg;
}
}
TmpV[j] = C*Tmp; // Berkhin (the correct way of doing it)
}
double sum = 0;
#pragma omp parallel for reduction(+:sum) schedule(dynamic,10000)
for (int i = 0; i < TmpV.Len(); i++) { sum += TmpV[i]; }
const double Leaked = (1.0-sum) / double(NNodes);
double diff = 0;
#pragma omp parallel for reduction(+:diff) schedule(dynamic,10000)
for (int i = 0; i < NNodes; i++) {
double NewVal = TmpV[i] + Leaked; // Berkhin
int Id = NV[i].GetId();
diff += fabs(NewVal-PRankV[Id]);
PRankV[Id] = NewVal;
}
if (diff < Eps) { break; }
}
#pragma omp parallel for schedule(dynamic,10000)
for (int i = 0; i < NNodes; i++) {
typename PGraph::TObj::TNodeI NI = NV[i];
PRankH[i] = PRankV[NI.GetId()];
}
}
#endif // USE_OPENMP
// Betweenness Centrality
template<class PGraph>
void GetBetweennessCentr(const PGraph& Graph, const TIntV& BtwNIdV, TIntFltH& NodeBtwH, const bool& DoNodeCent, TIntPrFltH& EdgeBtwH, const bool& DoEdgeCent, const bool& IsDir) {
if (DoNodeCent) { NodeBtwH.Clr(); }
if (DoEdgeCent) { EdgeBtwH.Clr(); }
const int nodes = Graph->GetNodes();
TIntS S(nodes);
TIntQ Q(nodes);
TIntIntVH P(nodes); // one vector for every node
TIntFltH delta(nodes);
TIntH sigma(nodes), d(nodes);
// init
for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) {
if (DoNodeCent) {
NodeBtwH.AddDat(NI.GetId(), 0); }
if (DoEdgeCent) {
for (int e = 0; e < NI.GetOutDeg(); e++) {
if (Graph->HasFlag(gfDirected) && IsDir) {
// add all outgoing edges for directed graphs
EdgeBtwH.AddDat(TIntPr(NI.GetId(), NI.GetOutNId(e)), 0);
} else {
// add each edge only once in undirected graphs
if (NI.GetId() < NI.GetOutNId(e)) {
EdgeBtwH.AddDat(TIntPr(NI.GetId(), NI.GetOutNId(e)), 0);
}
}
}
// add incoming edges in directed graphs that were not added yet
if (Graph->HasFlag(gfDirected) && !IsDir) {
for (int e = 0; e < NI.GetInDeg(); e++) {
if (NI.GetId() < NI.GetInNId(e) &&
!Graph->IsEdge(NI.GetId(), NI.GetInNId(e))) {
EdgeBtwH.AddDat(TIntPr(NI.GetId(), NI.GetInNId(e)), 0);
}
}
}
}
sigma.AddDat(NI.GetId(), 0);
d.AddDat(NI.GetId(), -1);
P.AddDat(NI.GetId(), TIntV());
delta.AddDat(NI.GetId(), 0);
}
// calc betweeness
for (int k=0; k < BtwNIdV.Len(); k++) {
const typename PGraph::TObj::TNodeI NI = Graph->GetNI(BtwNIdV[k]);
// reset
for (int i = 0; i < sigma.Len(); i++) {
sigma[i]=0; d[i]=-1; delta[i]=0; P[i].Clr(false);
}
S.Clr(false);
Q.Clr(false);
sigma.AddDat(NI.GetId(), 1);
d.AddDat(NI.GetId(), 0);
Q.Push(NI.GetId());
while (! Q.Empty()) {
const int v = Q.Top(); Q.Pop();
const typename PGraph::TObj::TNodeI NI2 = Graph->GetNI(v);
S.Push(v);
const int VDat = d.GetDat(v);
// iterate over all outgoing edges
for (int e = 0; e < NI2.GetOutDeg(); e++) {
const int w = NI2.GetOutNId(e);
if (d.GetDat(w) < 0) { // find w for the first time
Q.Push(w);
d.AddDat(w, VDat+1);
}
//shortest path to w via v ?
if (d.GetDat(w) == VDat+1) {
sigma.AddDat(w) += sigma.GetDat(v);
P.GetDat(w).Add(v);
}
}
// if ignoring direction in directed networks, iterate over incoming edges
if (Graph->HasFlag(gfDirected) && !IsDir) {
for (int e = 0; e < NI2.GetInDeg(); e++) {
const int w = NI2.GetInNId(e);
// skip neighbors that are also outgoing
if (Graph->IsEdge(NI2.GetId(), w)) {
continue;
}
if (d.GetDat(w) < 0) { // find w for the first time
Q.Push(w);
d.AddDat(w, VDat+1);
}
//shortest path to w via v ?
if (d.GetDat(w) == VDat+1) {
sigma.AddDat(w) += sigma.GetDat(v);
P.GetDat(w).Add(v);
}
}
}
}
while (! S.Empty()) {
const int w = S.Top();
const double SigmaW = sigma.GetDat(w);
const double DeltaW = delta.GetDat(w);
const TIntV NIdV = P.GetDat(w);
S.Pop();
for (int i = 0; i < NIdV.Len(); i++) {
const int NId = NIdV[i];
const double c = (sigma.GetDat(NId)*1.0/SigmaW) * (1+DeltaW);
delta.AddDat(NId) += c;
if (DoEdgeCent) {
if (Graph->HasFlag(gfDirected) && IsDir) {
EdgeBtwH.AddDat(TIntPr(NId, w)) += c;
} else {
EdgeBtwH.AddDat(TIntPr(TMath::Mn(NId, w), TMath::Mx(NId, w))) += c;
}
}
}
if (DoNodeCent && w != NI.GetId()) {
NodeBtwH.AddDat(w) += delta.GetDat(w)/2.0; }
}
}
}
template<class PGraph>
void GetBetweennessCentr(const PGraph& Graph, TIntFltH& NodeBtwH, const double& NodeFrac, const bool& IsDir) {
TIntPrFltH EdgeBtwH;
TIntV NIdV; Graph->GetNIdV(NIdV);
if (NodeFrac < 1.0) { // calculate beetweenness centrality for a subset of nodes
NIdV.Shuffle(TInt::Rnd);
for (int i = int((1.0-NodeFrac)*NIdV.Len()); i > 0; i--) {
NIdV.DelLast(); }
}
GetBetweennessCentr<PGraph> (Graph, NIdV, NodeBtwH, true, EdgeBtwH, false, IsDir);
}
template<class PGraph>
void GetBetweennessCentr(const PGraph& Graph, TIntPrFltH& EdgeBtwH, const double& NodeFrac, const bool& IsDir) {
TIntFltH NodeBtwH;
TIntV NIdV; Graph->GetNIdV(NIdV);
if (NodeFrac < 1.0) { // calculate beetweenness centrality for a subset of nodes
NIdV.Shuffle(TInt::Rnd);
for (int i = int((1.0-NodeFrac)*NIdV.Len()); i > 0; i--) {
NIdV.DelLast(); }
}
GetBetweennessCentr<PGraph> (Graph, NIdV, NodeBtwH, false, EdgeBtwH, true, IsDir);
}
template<class PGraph>
void GetBetweennessCentr(const PGraph& Graph, TIntFltH& NodeBtwH, TIntPrFltH& EdgeBtwH, const double& NodeFrac, const bool& IsDir) {
TIntV NIdV; Graph->GetNIdV(NIdV);
if (NodeFrac < 1.0) { // calculate beetweenness centrality for a subset of nodes
NIdV.Shuffle(TInt::Rnd);
for (int i = int((1.0-NodeFrac)*NIdV.Len()); i > 0; i--) {
NIdV.DelLast(); }
}
GetBetweennessCentr<PGraph> (Graph, NIdV, NodeBtwH, true, EdgeBtwH, true, IsDir);
}
template<class PGraph>
void GetHits(const PGraph& Graph, TIntFltH& NIdHubH, TIntFltH& NIdAuthH, const int& MaxIter) {
const int NNodes = Graph->GetNodes();
NIdHubH.Gen(NNodes);
NIdAuthH.Gen(NNodes);
for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) {
NIdHubH.AddDat(NI.GetId(), 1.0);
NIdAuthH.AddDat(NI.GetId(), 1.0);
}
double Norm=0;
for (int iter = 0; iter < MaxIter; iter++) {
// update authority scores
Norm = 0;
for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) {
double& Auth = NIdAuthH.GetDat(NI.GetId()).Val;
Auth = 0;
for (int e = 0; e < NI.GetInDeg(); e++) {
Auth += NIdHubH.GetDat(NI.GetInNId(e)); }
Norm += Auth*Auth;
}
Norm = sqrt(Norm);
for (int i = 0; i < NIdAuthH.Len(); i++) { NIdAuthH[i] /= Norm; }
// update hub scores
for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) {
double& Hub = NIdHubH.GetDat(NI.GetId()).Val;
Hub = 0;
for (int e = 0; e < NI.GetOutDeg(); e++) {
Hub += NIdAuthH.GetDat(NI.GetOutNId(e)); }
Norm += Hub*Hub;
}
Norm = sqrt(Norm);
for (int i = 0; i < NIdHubH.Len(); i++) { NIdHubH[i] /= Norm; }
}
// make sure Hub and Authority scores normalize to L2 norm 1
Norm = 0.0;
for (int i = 0; i < NIdHubH.Len(); i++) { Norm += TMath::Sqr(NIdHubH[i]); }
Norm = sqrt(Norm);
for (int i = 0; i < NIdHubH.Len(); i++) { NIdHubH[i] /= Norm; }
Norm = 0.0;
for (int i = 0; i < NIdAuthH.Len(); i++) { Norm += TMath::Sqr(NIdAuthH[i]); }
Norm = sqrt(Norm);
for (int i = 0; i < NIdAuthH.Len(); i++) { NIdAuthH[i] /= Norm; }
}
#ifdef USE_OPENMP
template<class PGraph>
void GetHitsMP(const PGraph& Graph, TIntFltH& NIdHubH, TIntFltH& NIdAuthH, const int& MaxIter) {
const int NNodes = Graph->GetNodes();
TIntV NV;
NIdHubH.Gen(NNodes);
NIdAuthH.Gen(NNodes);
for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) {
NV.Add(NI.GetId());
NIdHubH.AddDat(NI.GetId(), 1.0);
NIdAuthH.AddDat(NI.GetId(), 1.0);
}
double Norm=0;
for (int iter = 0; iter < MaxIter; iter++) {
// update authority scores
Norm = 0;
#pragma omp parallel for reduction(+:Norm) schedule(dynamic,1000)
for (int i = 0; i < NNodes; i++) {
typename PGraph::TObj::TNodeI NI = Graph->GetNI(NV[i]);
double& Auth = NIdAuthH.GetDat(NI.GetId()).Val;
Auth = 0;
for (int e = 0; e < NI.GetInDeg(); e++) {
Auth += NIdHubH.GetDat(NI.GetInNId(e)); }
Norm = Norm + Auth*Auth;
}
Norm = sqrt(Norm);
for (int i = 0; i < NIdAuthH.Len(); i++) { NIdAuthH[i] /= Norm; }
// update hub scores
#pragma omp parallel for reduction(+:Norm) schedule(dynamic,1000)
for (int i = 0; i < NNodes; i++) {
typename PGraph::TObj::TNodeI NI = Graph->GetNI(NV[i]);
double& Hub = NIdHubH.GetDat(NI.GetId()).Val;
Hub = 0;
for (int e = 0; e < NI.GetOutDeg(); e++) {
Hub += NIdAuthH.GetDat(NI.GetOutNId(e)); }
Norm = Norm + Hub*Hub;
}
Norm = sqrt(Norm);
for (int i = 0; i < NIdHubH.Len(); i++) { NIdHubH[i] /= Norm; }
}
// make sure Hub and Authority scores normalize to L2 norm 1
Norm = 0.0;
for (int i = 0; i < NIdHubH.Len(); i++) { Norm += TMath::Sqr(NIdHubH[i]); }
Norm = sqrt(Norm);
for (int i = 0; i < NIdHubH.Len(); i++) { NIdHubH[i] /= Norm; }
Norm = 0.0;
for (int i = 0; i < NIdAuthH.Len(); i++) { Norm += TMath::Sqr(NIdAuthH[i]); }
Norm = sqrt(Norm);
for (int i = 0; i < NIdAuthH.Len(); i++) { NIdAuthH[i] /= Norm; }
}
#endif
/// Gets sequence of PageRank tables from given \c GraphSeq into \c TableSeq.
template <class PGraph>
void MapPageRank(const TVec<PGraph>& GraphSeq, TVec<PTable>& TableSeq,
TTableContext* Context,
const double& C, const double& Eps, const int& MaxIter) {
int NumGraphs = GraphSeq.Len();
TableSeq.Reserve(NumGraphs, NumGraphs);
// This loop is parallelizable.
for (TInt i = 0; i < NumGraphs; i++) {
TIntFltH PRankH;
GetPageRank(GraphSeq[i], PRankH, C, Eps, MaxIter);
TableSeq[i] = TTable::TableFromHashMap(PRankH, "NodeId", "PageRank", Context, false);
}
}
/// Gets sequence of Hits tables from given \c GraphSeq into \c TableSeq.
template <class PGraph>
void MapHits(const TVec<PGraph>& GraphSeq, TVec<PTable>& TableSeq,
TTableContext* Context,
const int& MaxIter) {
int NumGraphs = GraphSeq.Len();
TableSeq.Reserve(NumGraphs, NumGraphs);
// This loop is parallelizable.
for (TInt i = 0; i < NumGraphs; i++) {
TIntFltH HubH;
TIntFltH AuthH;
GetHits(GraphSeq[i], HubH, AuthH, MaxIter);
PTable HubT = TTable::TableFromHashMap(HubH, "NodeId", "Hub", Context, false);
PTable AuthT = TTable::TableFromHashMap(AuthH, "NodeId", "Authority", Context, false);
PTable HitsT = HubT->Join("NodeId", AuthT, "NodeId");
HitsT->Rename("1.NodeId", "NodeId");
HitsT->Rename("1.Hub", "Hub");
HitsT->Rename("2.Authority", "Authority");
TStrV V = TStrV(3, 0);
V.Add("NodeId");
V.Add("Hub");
V.Add("Authority");
HitsT->ProjectInPlace(V);
TableSeq[i] = HitsT;
}
}
}; // namespace TSnap
|
GB_binop__bshift_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bshift_uint16
// A.*B function (eWiseMult): GB_AemultB__bshift_uint16
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bshift_uint16
// C+=b function (dense accum): GB_Cdense_accumb__bshift_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_uint16
// C=scalar+B GB_bind1st__bshift_uint16
// C=scalar+B' GB_bind1st_tran__bshift_uint16
// C=A+scalar GB_bind2nd__bshift_uint16
// C=A'+scalar GB_bind2nd_tran__bshift_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_uint16 (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_bitshift_uint16 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT16 || GxB_NO_BSHIFT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bshift_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bshift_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bshift_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bshift_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bshift_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bshift_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = GB_bitshift_uint16 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bshift_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = GB_bitshift_uint16 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_uint16 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__bshift_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_uint16 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__bshift_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
sieve.c | /*
* Adapted from: http://w...content-available-to-author-only...s.org/sieve-of-eratosthenes
*
* Programa testado no PARCODE
*
* Tempo sequencial:
* real 0m4.412s
* user 0m4.318s
* sys 0m0.080s
*
* Tempo paralelo:
* real 0m3.659s
* user 0m6.781s
* sys 0m0.080s
*
* Tempo paralelo com escalonamento:
* real 0m2.487s
* user 0m9.562s
* sys 0m0.084s
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <math.h>
int sieveOfEratosthenes(int n)
{
// Create a boolean array "prime[0..n]" and initialize
// all entries it as true. A value in prime[i] will
// finally be false if i is Not a prime, else true.
int primes = 0;
bool *prime = (bool *)malloc((n + 1) * sizeof(bool));
int sqrt_n = sqrt(n);
memset(prime, true, (n + 1) * sizeof(bool));
//#pragma omp parallel for
#pragma omp parallel for schedule(dynamic)
for (int p = 2; p <= sqrt_n; p++)
{
// If prime[p] is not changed, then it is a prime
if (prime[p] == true)
{
// Update all multiples of p
for (int i = p * 2; i <= n; i += p)
prime[i] = false;
}
}
// count prime numbers
#pragma omp parallel for reduction(+:primes)
for (int p = 2; p <= n; p++)
if (prime[p])
primes++;
return (primes);
}
int main()
{
int n = 100000000;
printf("%d\n", sieveOfEratosthenes(n));
return 0;
}
|
test2.c | int main() {
int x;
#pragma omp parallel
{
0;
if (1) {
x = 0;
2;
#pragma omp barrier
x;
3;
} else {
4;
while (5) {
6;
#pragma omp barrier
7;
x = 10;
#pragma omp barrier
}
8;
}
9;
#pragma omp barrier
10;
}
}
|
distribute_PEs.c | #define N 1000
#define NB_CLUSTERS 4
#define NB_PES 16
#define MIN(x, y) ((x) < (y) ? x : y)
#include <stdio.h>
/* Initialize an array between 2 given lines */
void init_array(int a[N][N], int begin, int end) {
for (int i = begin; i < end; i++)
for (int j = 0; j < N; j++)
a[i][j] = 2*i + 3*j;
}
int main() {
int a[N][N];
int slice = N/(NB_CLUSTERS*NB_PES);
/* Launch enough OpenMP thread to control all the fabric:
Assume that the runtime allows enough threads with nested
parallelism */
#pragma omp parallel for num_threads(NB_CLUSTERS)
for (int cluster = 0; cluster < NB_CLUSTERS; cluster++)
#pragma omp parallel for num_threads(NB_PES)
for (int pe = 0; pe < NB_PES; pe++) {
/* So now the iterations should be distributed with 1
iteration/thread, on NB_CLUSTERS*NB_PES threads.
Distribute the initialization on all the fabric: */
int global_pe = cluster*NB_PES + pe;
int begin = slice*global_pe;
int end = MIN(N, slice*(global_pe + 1));
#pragma smecy map(STHORM, cluster, pe)
#pragma smecy arg(a, out, /[begin:end-1][])
init_array(a, begin, end);
}
printf("a[27][42] = %d\n", a[27][42]);
return 0;
}
|
openmp3.c | #include <math.h>
#include <omp.h>
double
inner_sum(double *li, double *lj, int n)
{
double s = 0;
for (int i = 0; i < n; i++) {
s += li[i] * lj[i];
}
return s;
}
void
cholesky(double *A, double *L, int n)
{
for (int j = 0; j < n; j++) {
double s = inner_sum(&L[j * n], &L[j * n], j);
L[j * n + j] = sqrt(A[j * n + j] - s);
#pragma omp parallel for schedule(static, 8)
for (int i = j + 1; i < n; i++) {
double s = inner_sum(&L[j * n], &L[i * n], j);
L[i * n + j] = (1.0 / L[j * n + j] * (A[i * n + j] - s));
}
}
}
|
draw.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD RRRR AAA W W %
% D D R R A A W W %
% D D RRRR AAAAA W W W %
% D D R RN A A WW WW %
% DDDD R R A A W W %
% %
% %
% MagickCore Image Drawing Methods %
% %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon
% rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion",
% Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent
% (www.appligent.com) contributed the dash pattern, linecap stroking
% algorithm, and minor rendering improvements.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory-private.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Define declarations.
*/
#define BezierQuantum 200
#define PrimitiveExtentPad 2048
#define MaxBezierCoordinates 67108864
#define ThrowPointExpectedException(image,token) \
{ \
(void) ThrowMagickException(&(image)->exception,GetMagickModule(),DrawError, \
"NonconformingDrawingPrimitiveDefinition","`%s'",token); \
status=MagickFalse; \
break; \
}
/*
Typedef declarations.
*/
typedef struct _EdgeInfo
{
SegmentInfo
bounds;
double
scanline;
PointInfo
*points;
size_t
number_points;
ssize_t
direction;
MagickBooleanType
ghostline;
size_t
highwater;
} EdgeInfo;
typedef struct _ElementInfo
{
double
cx,
cy,
major,
minor,
angle;
} ElementInfo;
typedef struct _MVGInfo
{
PrimitiveInfo
**primitive_info;
size_t
*extent;
ssize_t
offset;
PointInfo
point;
ExceptionInfo
*exception;
} MVGInfo;
typedef struct _PolygonInfo
{
EdgeInfo
*edges;
size_t
number_edges;
} PolygonInfo;
typedef enum
{
MoveToCode,
OpenCode,
GhostlineCode,
LineToCode,
EndCode
} PathInfoCode;
typedef struct _PathInfo
{
PointInfo
point;
PathInfoCode
code;
} PathInfo;
/*
Forward declarations.
*/
static Image
*DrawClippingMask(Image *,const DrawInfo *,const char *,const char *,
ExceptionInfo *);
static MagickBooleanType
DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *),
RenderMVGContent(Image *,const DrawInfo *,const size_t),
TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo,
const double,const MagickBooleanType,const MagickBooleanType),
TraceBezier(MVGInfo *,const size_t),
TraceCircle(MVGInfo *,const PointInfo,const PointInfo),
TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo),
TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo),
TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo),
TraceSquareLinecap(PrimitiveInfo *,const size_t,const double);
static PrimitiveInfo
*TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *);
static size_t
TracePath(Image *,MVGInfo *,const char *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireDrawInfo() returns a DrawInfo structure properly initialized.
%
% The format of the AcquireDrawInfo method is:
%
% DrawInfo *AcquireDrawInfo(void)
%
*/
MagickExport DrawInfo *AcquireDrawInfo(void)
{
DrawInfo
*draw_info;
draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info));
GetDrawInfo((ImageInfo *) NULL,draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneDrawInfo() makes a copy of the given draw_info structure. If NULL
% is specified, a new DrawInfo structure is created initialized to default
% values.
%
% The format of the CloneDrawInfo method is:
%
% DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info,
const DrawInfo *draw_info)
{
DrawInfo
*clone_info;
clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info));
GetDrawInfo(image_info,clone_info);
if (draw_info == (DrawInfo *) NULL)
return(clone_info);
if (draw_info->id != (char *) NULL)
(void) CloneString(&clone_info->id,draw_info->id);
if (draw_info->primitive != (char *) NULL)
(void) CloneString(&clone_info->primitive,draw_info->primitive);
if (draw_info->geometry != (char *) NULL)
(void) CloneString(&clone_info->geometry,draw_info->geometry);
clone_info->compliance=draw_info->compliance;
clone_info->viewbox=draw_info->viewbox;
clone_info->affine=draw_info->affine;
clone_info->gravity=draw_info->gravity;
clone_info->fill=draw_info->fill;
clone_info->stroke=draw_info->stroke;
clone_info->stroke_width=draw_info->stroke_width;
if (draw_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue,
&draw_info->fill_pattern->exception);
else
if (draw_info->tile != (Image *) NULL)
clone_info->fill_pattern=CloneImage(draw_info->tile,0,0,MagickTrue,
&draw_info->tile->exception);
clone_info->tile=NewImageList(); /* tile is deprecated */
if (draw_info->stroke_pattern != (Image *) NULL)
clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0,
MagickTrue,&draw_info->stroke_pattern->exception);
clone_info->stroke_antialias=draw_info->stroke_antialias;
clone_info->text_antialias=draw_info->text_antialias;
clone_info->fill_rule=draw_info->fill_rule;
clone_info->linecap=draw_info->linecap;
clone_info->linejoin=draw_info->linejoin;
clone_info->miterlimit=draw_info->miterlimit;
clone_info->dash_offset=draw_info->dash_offset;
clone_info->decorate=draw_info->decorate;
clone_info->compose=draw_info->compose;
if (draw_info->text != (char *) NULL)
(void) CloneString(&clone_info->text,draw_info->text);
if (draw_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,draw_info->font);
if (draw_info->metrics != (char *) NULL)
(void) CloneString(&clone_info->metrics,draw_info->metrics);
if (draw_info->family != (char *) NULL)
(void) CloneString(&clone_info->family,draw_info->family);
clone_info->style=draw_info->style;
clone_info->stretch=draw_info->stretch;
clone_info->weight=draw_info->weight;
if (draw_info->encoding != (char *) NULL)
(void) CloneString(&clone_info->encoding,draw_info->encoding);
clone_info->pointsize=draw_info->pointsize;
clone_info->kerning=draw_info->kerning;
clone_info->interline_spacing=draw_info->interline_spacing;
clone_info->interword_spacing=draw_info->interword_spacing;
clone_info->direction=draw_info->direction;
if (draw_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,draw_info->density);
clone_info->align=draw_info->align;
clone_info->undercolor=draw_info->undercolor;
clone_info->border_color=draw_info->border_color;
if (draw_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
{
register ssize_t
x;
for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ;
clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*clone_info->dash_pattern));
if (clone_info->dash_pattern == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memset(clone_info->dash_pattern,0,(size_t) (2*x+2)*
sizeof(*clone_info->dash_pattern));
(void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t)
(x+1)*sizeof(*clone_info->dash_pattern));
}
clone_info->gradient=draw_info->gradient;
if (draw_info->gradient.stops != (StopInfo *) NULL)
{
size_t
number_stops;
number_stops=clone_info->gradient.number_stops;
clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t)
number_stops,sizeof(*clone_info->gradient.stops));
if (clone_info->gradient.stops == (StopInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateDashPattern");
(void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops,
(size_t) number_stops*sizeof(*clone_info->gradient.stops));
}
clone_info->bounds=draw_info->bounds;
clone_info->fill_opacity=draw_info->fill_opacity;
clone_info->stroke_opacity=draw_info->stroke_opacity;
clone_info->element_reference=draw_info->element_reference;
clone_info->clip_path=draw_info->clip_path;
clone_info->clip_units=draw_info->clip_units;
if (draw_info->clip_mask != (char *) NULL)
(void) CloneString(&clone_info->clip_mask,draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0,
MagickTrue,&draw_info->clipping_mask->exception);
if (draw_info->composite_mask != (Image *) NULL)
clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0,
MagickTrue,&draw_info->composite_mask->exception);
clone_info->render=draw_info->render;
clone_info->debug=IsEventLogging();
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P a t h T o P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPathToPolygon() converts a path to the more efficient sorted
% rendering form.
%
% The format of the ConvertPathToPolygon method is:
%
% PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
%
% A description of each parameter follows:
%
% o Method ConvertPathToPolygon returns the path in a more efficient sorted
% rendering form of type PolygonInfo.
%
% o draw_info: Specifies a pointer to an DrawInfo structure.
%
% o path_info: Specifies a pointer to an PathInfo structure.
%
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int DrawCompareEdges(const void *p_edge,const void *q_edge)
{
#define DrawCompareEdge(p,q) \
{ \
if (((p)-(q)) < 0.0) \
return(-1); \
if (((p)-(q)) > 0.0) \
return(1); \
}
register const PointInfo
*p,
*q;
/*
Edge sorting for right-handed coordinate system.
*/
p=((const EdgeInfo *) p_edge)->points;
q=((const EdgeInfo *) q_edge)->points;
DrawCompareEdge(p[0].y,q[0].y);
DrawCompareEdge(p[0].x,q[0].x);
DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)*
(q[1].x-q[0].x));
DrawCompareEdge(p[1].y,q[1].y);
DrawCompareEdge(p[1].x,q[1].x);
return(0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static void LogPolygonInfo(const PolygonInfo *polygon_info)
{
register EdgeInfo
*p;
register ssize_t
i,
j;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge");
p=polygon_info->edges;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:",
(double) i);
(void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s",
p->direction != MagickFalse ? "down" : "up");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s",
p->ghostline != MagickFalse ? "transparent" : "opaque");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1,
p->bounds.x2,p->bounds.y2);
for (j=0; j < (ssize_t) p->number_points; j++)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g",
p->points[j].x,p->points[j].y);
p++;
}
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge");
}
static void ReversePoints(PointInfo *points,const size_t number_points)
{
PointInfo
point;
register ssize_t
i;
for (i=0; i < (ssize_t) (number_points >> 1); i++)
{
point=points[i];
points[i]=points[number_points-(i+1)];
points[number_points-(i+1)]=point;
}
}
static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info)
{
long
direction,
next_direction;
PointInfo
point,
*points;
PolygonInfo
*polygon_info;
SegmentInfo
bounds;
register ssize_t
i,
n;
MagickBooleanType
ghostline;
size_t
edge,
number_edges,
number_points;
/*
Convert a path to the more efficient sorted rendering form.
*/
polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info));
if (polygon_info == (PolygonInfo *) NULL)
return((PolygonInfo *) NULL);
number_edges=16;
polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
(void) memset(polygon_info->edges,0,number_edges*
sizeof(*polygon_info->edges));
direction=0;
edge=0;
ghostline=MagickFalse;
n=0;
number_points=0;
points=(PointInfo *) NULL;
(void) memset(&point,0,sizeof(point));
(void) memset(&bounds,0,sizeof(bounds));
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=0.0;
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) direction;
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->number_edges=0;
for (i=0; path_info[i].code != EndCode; i++)
{
if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) ||
(path_info[i].code == GhostlineCode))
{
/*
Move to.
*/
if ((points != (PointInfo *) NULL) && (n >= 2))
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
points=(PointInfo *) NULL;
ghostline=MagickFalse;
edge++;
}
if (points == (PointInfo *) NULL)
{
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse;
point=path_info[i].point;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
direction=0;
n=1;
continue;
}
/*
Line to.
*/
next_direction=((path_info[i].point.y > point.y) ||
((fabs(path_info[i].point.y-point.y) < MagickEpsilon) &&
(path_info[i].point.x > point.x))) ? 1 : -1;
if ((points != (PointInfo *) NULL) && (direction != 0) &&
(direction != next_direction))
{
/*
New edge.
*/
point=points[n-1];
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
number_points=16;
points=(PointInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
n=1;
ghostline=MagickFalse;
points[0]=point;
bounds.x1=point.x;
bounds.x2=point.x;
edge++;
}
direction=next_direction;
if (points == (PointInfo *) NULL)
continue;
if (n == (ssize_t) number_points)
{
number_points<<=1;
points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points,
sizeof(*points));
if (points == (PointInfo *) NULL)
return((PolygonInfo *) NULL);
}
point=path_info[i].point;
points[n]=point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.x > bounds.x2)
bounds.x2=point.x;
n++;
}
if (points != (PointInfo *) NULL)
{
if (n < 2)
points=(PointInfo *) RelinquishMagickMemory(points);
else
{
if (edge == number_edges)
{
number_edges<<=1;
polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory(
polygon_info->edges,(size_t) number_edges,
sizeof(*polygon_info->edges));
if (polygon_info->edges == (EdgeInfo *) NULL)
return((PolygonInfo *) NULL);
}
polygon_info->edges[edge].number_points=(size_t) n;
polygon_info->edges[edge].scanline=(-1.0);
polygon_info->edges[edge].highwater=0;
polygon_info->edges[edge].ghostline=ghostline;
polygon_info->edges[edge].direction=(ssize_t) (direction > 0);
if (direction < 0)
ReversePoints(points,(size_t) n);
polygon_info->edges[edge].points=points;
polygon_info->edges[edge].bounds=bounds;
polygon_info->edges[edge].bounds.y1=points[0].y;
polygon_info->edges[edge].bounds.y2=points[n-1].y;
ghostline=MagickFalse;
edge++;
}
}
polygon_info->number_edges=edge;
qsort(polygon_info->edges,(size_t) polygon_info->number_edges,
sizeof(*polygon_info->edges),DrawCompareEdges);
if (IsEventLogging() != MagickFalse)
LogPolygonInfo(polygon_info);
return(polygon_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n v e r t P r i m i t i v e T o P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector
% path structure.
%
% The format of the ConvertPrimitiveToPath method is:
%
% PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o Method ConvertPrimitiveToPath returns a vector path structure of type
% PathInfo.
%
% o draw_info: a structure of type DrawInfo.
%
% o primitive_info: Specifies a pointer to an PrimitiveInfo structure.
%
%
*/
static void LogPathInfo(const PathInfo *path_info)
{
register const PathInfo
*p;
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path");
for (p=path_info; p->code != EndCode; p++)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ?
"moveto ghostline" : p->code == OpenCode ? "moveto open" :
p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" :
"?");
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path");
}
static PathInfo *ConvertPrimitiveToPath(
const DrawInfo *magick_unused(draw_info),const PrimitiveInfo *primitive_info)
{
MagickBooleanType
closed_subpath;
PathInfo
*path_info;
PathInfoCode
code;
PointInfo
p,
q;
register ssize_t
i,
n;
ssize_t
coordinates,
start;
magick_unreferenced(draw_info);
/*
Converts a PrimitiveInfo structure into a vector path structure.
*/
switch (primitive_info->primitive)
{
case PointPrimitive:
case ColorPrimitive:
case MattePrimitive:
case TextPrimitive:
case ImagePrimitive:
return((PathInfo *) NULL);
default:
break;
}
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL),
sizeof(*path_info));
if (path_info == (PathInfo *) NULL)
return((PathInfo *) NULL);
coordinates=0;
closed_subpath=MagickFalse;
n=0;
p.x=(-1.0);
p.y=(-1.0);
q.x=(-1.0);
q.y=(-1.0);
start=0;
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
code=LineToCode;
if (coordinates <= 0)
{
/*
New subpath.
*/
coordinates=(ssize_t) primitive_info[i].coordinates;
p=primitive_info[i].point;
start=n;
code=MoveToCode;
closed_subpath=primitive_info[i].closed_subpath;
}
coordinates--;
if ((code == MoveToCode) || (coordinates <= 0) ||
(fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) ||
(fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon))
{
/*
Eliminate duplicate points.
*/
path_info[n].code=code;
path_info[n].point=primitive_info[i].point;
q=primitive_info[i].point;
n++;
}
if (coordinates > 0)
continue; /* next point in current subpath */
if (closed_subpath != MagickFalse)
{
closed_subpath=MagickFalse;
continue;
}
/*
Mark the p point as open if the subpath is not closed.
*/
path_info[start].code=OpenCode;
path_info[n].code=GhostlineCode;
path_info[n].point=primitive_info[i].point;
n++;
path_info[n].code=LineToCode;
path_info[n].point=p;
n++;
}
path_info[n].code=EndCode;
path_info[n].point.x=0.0;
path_info[n].point.y=0.0;
if (IsEventLogging() != MagickFalse)
LogPathInfo(path_info);
path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1),
sizeof(*path_info));
return(path_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyDrawInfo() deallocates memory associated with an DrawInfo structure.
%
% The format of the DestroyDrawInfo method is:
%
% DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
*/
MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info)
{
assert(draw_info != (DrawInfo *) NULL);
if (draw_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info->signature == MagickCoreSignature);
if (draw_info->id != (char *) NULL)
draw_info->id=DestroyString(draw_info->id);
if (draw_info->primitive != (char *) NULL)
draw_info->primitive=DestroyString(draw_info->primitive);
if (draw_info->text != (char *) NULL)
draw_info->text=DestroyString(draw_info->text);
if (draw_info->geometry != (char *) NULL)
draw_info->geometry=DestroyString(draw_info->geometry);
if (draw_info->tile != (Image *) NULL)
draw_info->tile=DestroyImage(draw_info->tile);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern);
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
if (draw_info->metrics != (char *) NULL)
draw_info->metrics=DestroyString(draw_info->metrics);
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
if (draw_info->encoding != (char *) NULL)
draw_info->encoding=DestroyString(draw_info->encoding);
if (draw_info->density != (char *) NULL)
draw_info->density=DestroyString(draw_info->density);
if (draw_info->server_name != (char *) NULL)
draw_info->server_name=(char *)
RelinquishMagickMemory(draw_info->server_name);
if (draw_info->dash_pattern != (double *) NULL)
draw_info->dash_pattern=(double *) RelinquishMagickMemory(
draw_info->dash_pattern);
if (draw_info->gradient.stops != (StopInfo *) NULL)
draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory(
draw_info->gradient.stops);
if (draw_info->clip_mask != (char *) NULL)
draw_info->clip_mask=DestroyString(draw_info->clip_mask);
if (draw_info->clipping_mask != (Image *) NULL)
draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask);
if (draw_info->composite_mask != (Image *) NULL)
draw_info->composite_mask=DestroyImage(draw_info->composite_mask);
draw_info->signature=(~MagickCoreSignature);
draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info);
return(draw_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y E d g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyEdge() destroys the specified polygon edge.
%
% The format of the DestroyEdge method is:
%
% ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
% o edge: the polygon edge number to destroy.
%
*/
static size_t DestroyEdge(PolygonInfo *polygon_info,
const size_t edge)
{
assert(edge < polygon_info->number_edges);
polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory(
polygon_info->edges[edge].points);
polygon_info->number_edges--;
if (edge < polygon_info->number_edges)
(void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1,
(size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges));
return(polygon_info->number_edges);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P o l y g o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPolygonInfo() destroys the PolygonInfo data structure.
%
% The format of the DestroyPolygonInfo method is:
%
% PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o polygon_info: Specifies a pointer to an PolygonInfo structure.
%
*/
static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info)
{
register ssize_t
i;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
polygon_info->edges[i].points=(PointInfo *)
RelinquishMagickMemory(polygon_info->edges[i].points);
polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges);
return((PolygonInfo *) RelinquishMagickMemory(polygon_info));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w A f f i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawAffineImage() composites the source over the destination image as
% dictated by the affine transform.
%
% The format of the DrawAffineImage method is:
%
% MagickBooleanType DrawAffineImage(Image *image,const Image *source,
% const AffineMatrix *affine)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o source: the source image.
%
% o affine: the affine transform.
%
*/
static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine,
const double y,const SegmentInfo *edge)
{
double
intercept,
z;
register double
x;
SegmentInfo
inverse_edge;
/*
Determine left and right edges.
*/
inverse_edge.x1=edge->x1;
inverse_edge.y1=edge->y1;
inverse_edge.x2=edge->x2;
inverse_edge.y2=edge->y2;
z=affine->ry*y+affine->tx;
if (affine->sx >= MagickEpsilon)
{
intercept=(-z/affine->sx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->sx < -MagickEpsilon)
{
intercept=(-z+(double) image->columns)/affine->sx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->sx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns))
{
inverse_edge.x2=edge->x1;
return(inverse_edge);
}
/*
Determine top and bottom edges.
*/
z=affine->sy*y+affine->ty;
if (affine->rx >= MagickEpsilon)
{
intercept=(-z/affine->rx);
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if (affine->rx < -MagickEpsilon)
{
intercept=(-z+(double) image->rows)/affine->rx;
x=intercept;
if (x > inverse_edge.x1)
inverse_edge.x1=x;
intercept=(-z/affine->rx);
x=intercept;
if (x < inverse_edge.x2)
inverse_edge.x2=x;
}
else
if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows))
{
inverse_edge.x2=edge->x2;
return(inverse_edge);
}
return(inverse_edge);
}
static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
double
determinant;
determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx*
affine->ry);
inverse_affine.sx=determinant*affine->sy;
inverse_affine.rx=determinant*(-affine->rx);
inverse_affine.ry=determinant*(-affine->ry);
inverse_affine.sy=determinant*affine->sx;
inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty*
inverse_affine.ry;
inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty*
inverse_affine.sy;
return(inverse_affine);
}
MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
PointInfo
extent[4],
min,
max,
point;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetMagickPixelPacket(image,&zero);
exception=(&image->exception);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
MagickPixelPacket
composite,
pixel;
PointInfo
point;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (PixelPacket *) NULL)
continue;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolateMagickPixelPacket(source,source_view,
UndefinedInterpolatePixel,point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
SetMagickPixelPacket(image,q,indexes+x_offset,&composite);
MagickPixelCompositeOver(&pixel,pixel.opacity,&composite,
composite.opacity,&composite);
SetPixelPacket(image,&composite,q,indexes+x_offset);
x_offset++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w B o u n d i n g R e c t a n g l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawBoundingRectangles() draws the bounding rectangles on the image. This
% is only useful for developers debugging the rendering algorithm.
%
% The format of the DrawBoundingRectangles method is:
%
% MagickBooleanType DrawBoundingRectangles(Image *image,
% const DrawInfo *draw_info,PolygonInfo *polygon_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o polygon_info: Specifies a pointer to a PolygonInfo structure.
%
*/
static inline double SaneStrokeWidth(const Image *image,
const DrawInfo *draw_info)
{
return(MagickMin((double) draw_info->stroke_width,
(2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows)));
}
static MagickBooleanType DrawBoundingRectangles(Image *image,
const DrawInfo *draw_info,const PolygonInfo *polygon_info)
{
double
mid;
DrawInfo
*clone_info;
MagickStatusType
status;
PointInfo
end,
resolution,
start;
PrimitiveInfo
primitive_info[6];
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
coordinates;
(void) memset(primitive_info,0,sizeof(primitive_info));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
status=QueryColorDatabase("#0000",&clone_info->fill,&image->exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
resolution.x=96.0;
resolution.y=96.0;
if (clone_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(clone_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == MagickFalse)
resolution.y=resolution.x;
}
mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)*
SaneStrokeWidth(image,clone_info)/2.0;
bounds.x1=0.0;
bounds.y1=0.0;
bounds.x2=0.0;
bounds.y2=0.0;
if (polygon_info != (PolygonInfo *) NULL)
{
bounds=polygon_info->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1)
bounds.x1=polygon_info->edges[i].bounds.x1;
if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1)
bounds.y1=polygon_info->edges[i].bounds.y1;
if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2)
bounds.x2=polygon_info->edges[i].bounds.x2;
if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2)
bounds.y2=polygon_info->edges[i].bounds.y2;
}
bounds.x1-=mid;
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double)
image->columns ? (double) image->columns-1 : bounds.x1;
bounds.y1-=mid;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double)
image->rows ? (double) image->rows-1 : bounds.y1;
bounds.x2+=mid;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double)
image->columns ? (double) image->columns-1 : bounds.x2;
bounds.y2+=mid;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double)
image->rows ? (double) image->rows-1 : bounds.y2;
for (i=0; i < (ssize_t) polygon_info->number_edges; i++)
{
if (polygon_info->edges[i].direction != 0)
status=QueryColorDatabase("#f00",&clone_info->stroke,
&image->exception);
else
status=QueryColorDatabase("#0f0",&clone_info->stroke,
&image->exception);
if (status == MagickFalse)
break;
start.x=(double) (polygon_info->edges[i].bounds.x1-mid);
start.y=(double) (polygon_info->edges[i].bounds.y1-mid);
end.x=(double) (polygon_info->edges[i].bounds.x2+mid);
end.y=(double) (polygon_info->edges[i].bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info);
if (status == MagickFalse)
break;
}
if (i < (ssize_t) polygon_info->number_edges)
{
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
}
status=QueryColorDatabase("#00f",&clone_info->stroke,&image->exception);
if (status == MagickFalse)
{
clone_info=DestroyDrawInfo(clone_info);
return(MagickFalse);
}
start.x=(double) (bounds.x1-mid);
start.y=(double) (bounds.y1-mid);
end.x=(double) (bounds.x2+mid);
end.y=(double) (bounds.y2+mid);
primitive_info[0].primitive=RectanglePrimitive;
status&=TraceRectangle(primitive_info,start,end);
primitive_info[0].method=ReplaceMethod;
coordinates=(ssize_t) primitive_info[0].coordinates;
primitive_info[coordinates].primitive=UndefinedPrimitive;
status=DrawPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClipPath() draws the clip path on the image mask.
%
% The format of the DrawClipPath method is:
%
% MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info,
% const char *id)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
*/
MagickExport MagickBooleanType DrawClipPath(Image *image,
const DrawInfo *draw_info,const char *id)
{
const char
*clip_path;
Image
*clipping_mask;
MagickBooleanType
status;
clip_path=GetImageArtifact(image,id);
if (clip_path == (const char *) NULL)
return(MagickFalse);
clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path,
&image->exception);
if (clipping_mask == (Image *) NULL)
return(MagickFalse);
status=SetImageClipMask(image,clipping_mask);
clipping_mask=DestroyImage(clipping_mask);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C l i p p i n g M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawClippingMask() draws the clip path and returns it as an image clipping
% mask.
%
% The format of the DrawClippingMask method is:
%
% Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *clip_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the clip path id.
%
% o clip_path: the clip path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *clip_path,ExceptionInfo *exception)
{
DrawInfo
*clone_info;
Image
*clip_mask;
MagickStatusType
status;
/*
Draw a clip path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
clip_mask=AcquireImage((const ImageInfo *) NULL);
status=SetImageExtent(clip_mask,image->columns,image->rows);
if (status == MagickFalse)
return(DestroyImage(clip_mask));
status=SetImageClipMask(image,(Image *) NULL);
status=QueryColorCompliance("#0000",AllCompliance,
&clip_mask->background_color,exception);
clip_mask->background_color.opacity=(Quantum) TransparentOpacity;
status=SetImageBackgroundColor(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,clip_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
if (clone_info->clip_mask != (char *) NULL)
clone_info->clip_mask=DestroyString(clone_info->clip_mask);
(void) QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->opacity=OpaqueOpacity;
clone_info->clip_path=MagickTrue;
status=RenderMVGContent(clip_mask,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
status&=SeparateImageChannel(clip_mask,TrueAlphaChannel);
if (draw_info->compliance != SVGCompliance)
status&=NegateImage(clip_mask,MagickFalse);
if (status == MagickFalse)
clip_mask=DestroyImage(clip_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path");
return(clip_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w C o m p o s i t e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawCompositeMask() draws the mask path and returns it as an image mask.
%
% The format of the DrawCompositeMask method is:
%
% Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
% const char *id,const char *mask_path,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o id: the mask path id.
%
% o mask_path: the mask path.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info,
const char *id,const char *mask_path,ExceptionInfo *exception)
{
Image
*composite_mask;
DrawInfo
*clone_info;
MagickStatusType
status;
/*
Draw a mask path.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
composite_mask=AcquireImage((const ImageInfo *) NULL);
status=SetImageExtent(composite_mask,image->columns,image->rows);
if (status == MagickFalse)
return(DestroyImage(composite_mask));
status=SetImageMask(image,(Image *) NULL);
status=QueryColorCompliance("#0000",AllCompliance,
&composite_mask->background_color,exception);
composite_mask->background_color.opacity=(Quantum) TransparentOpacity;
(void) SetImageBackgroundColor(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s",
id);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->primitive,mask_path);
status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill,
exception);
status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke,
exception);
clone_info->stroke_width=0.0;
clone_info->opacity=OpaqueOpacity;
status=RenderMVGContent(composite_mask,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
status&=SeparateImageChannel(composite_mask,TrueAlphaChannel);
status&=NegateImage(composite_mask,MagickFalse);
if (status == MagickFalse)
composite_mask=DestroyImage(composite_mask);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path");
return(composite_mask);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w D a s h P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the
% image while respecting the dash offset and dash pattern attributes.
%
% The format of the DrawDashPolygon method is:
%
% MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
% const PrimitiveInfo *primitive_info,Image *image)
%
% A description of each parameter follows:
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
% o image: the image.
%
%
*/
static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info,Image *image)
{
double
length,
maximum_length,
offset,
scale,
total_length;
DrawInfo
*clone_info;
MagickStatusType
status;
PrimitiveInfo
*dash_polygon;
register double
dx,
dy;
register ssize_t
i;
size_t
number_vertices;
ssize_t
j,
n;
assert(draw_info != (const DrawInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash");
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ;
number_vertices=(size_t) i;
dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(2UL*number_vertices+32UL),sizeof(*dash_polygon));
if (dash_polygon == (PrimitiveInfo *) NULL)
return(MagickFalse);
(void) memset(dash_polygon,0,(2UL*number_vertices+32UL)*
sizeof(*dash_polygon));
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->miterlimit=0;
dash_polygon[0]=primitive_info[0];
scale=ExpandAffine(&draw_info->affine);
length=scale*draw_info->dash_pattern[0];
offset=fabs(draw_info->dash_offset) >= MagickEpsilon ?
scale*draw_info->dash_offset : 0.0;
j=1;
for (n=0; offset > 0.0; j=0)
{
if (draw_info->dash_pattern[n] <= 0.0)
break;
length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5));
if (offset > length)
{
offset-=length;
n++;
length=scale*draw_info->dash_pattern[n];
continue;
}
if (offset < length)
{
length-=offset;
offset=0.0;
break;
}
offset=0.0;
n++;
}
status=MagickTrue;
maximum_length=0.0;
total_length=0.0;
for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++)
{
dx=primitive_info[i].point.x-primitive_info[i-1].point.x;
dy=primitive_info[i].point.y-primitive_info[i-1].point.y;
maximum_length=hypot(dx,dy);
if (maximum_length > MaxBezierCoordinates)
break;
if (fabs(length) < MagickEpsilon)
{
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); )
{
total_length+=length;
if ((n & 0x01) != 0)
{
dash_polygon[0]=primitive_info[0];
dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
j=1;
}
else
{
if ((j+1) > (ssize_t) number_vertices)
break;
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy*
total_length*PerceptibleReciprocal(maximum_length));
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon);
if (status == MagickFalse)
break;
}
if (fabs(draw_info->dash_pattern[n]) >= MagickEpsilon)
n++;
if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon)
n=0;
length=scale*draw_info->dash_pattern[n];
}
length-=(maximum_length-total_length);
if ((n & 0x01) != 0)
continue;
dash_polygon[j]=primitive_info[i];
dash_polygon[j].coordinates=1;
j++;
}
if ((status != MagickFalse) && (total_length < maximum_length) &&
((n & 0x01) == 0) && (j > 1))
{
dash_polygon[j]=primitive_info[i-1];
dash_polygon[j].point.x+=MagickEpsilon;
dash_polygon[j].point.y+=MagickEpsilon;
dash_polygon[j].coordinates=1;
j++;
dash_polygon[0].coordinates=(size_t) j;
dash_polygon[j].primitive=UndefinedPrimitive;
status&=DrawStrokePolygon(image,clone_info,dash_polygon);
}
dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawGradientImage() draws a linear gradient on the image.
%
% The format of the DrawGradientImage method is:
%
% MagickBooleanType DrawGradientImage(Image *image,
% const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
*/
static inline double GetStopColorOffset(const GradientInfo *gradient,
const ssize_t x,const ssize_t y)
{
switch (gradient->type)
{
case UndefinedGradient:
case LinearGradient:
{
double
gamma,
length,
offset,
scale;
PointInfo
p,
q;
const SegmentInfo
*gradient_vector;
gradient_vector=(&gradient->gradient_vector);
p.x=gradient_vector->x2-gradient_vector->x1;
p.y=gradient_vector->y2-gradient_vector->y1;
q.x=(double) x-gradient_vector->x1;
q.y=(double) y-gradient_vector->y1;
length=sqrt(q.x*q.x+q.y*q.y);
gamma=sqrt(p.x*p.x+p.y*p.y)*length;
gamma=PerceptibleReciprocal(gamma);
scale=p.x*q.x+p.y*q.y;
offset=gamma*scale*length;
return(offset);
}
case RadialGradient:
{
PointInfo
v;
if (gradient->spread == RepeatSpread)
{
v.x=(double) x-gradient->center.x;
v.y=(double) y-gradient->center.y;
return(sqrt(v.x*v.x+v.y*v.y));
}
v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians(
gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.x);
v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians(
gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians(
gradient->angle))))*PerceptibleReciprocal(gradient->radii.y);
return(sqrt(v.x*v.x+v.y*v.y));
}
}
return(0.0);
}
MagickExport MagickBooleanType DrawGradientImage(Image *image,
const DrawInfo *draw_info)
{
CacheView
*image_view;
const GradientInfo
*gradient;
const SegmentInfo
*gradient_vector;
double
length;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickPixelPacket
zero;
PointInfo
point;
RectangleInfo
bounding_box;
ssize_t
y;
/*
Draw linear or radial gradient on image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
gradient=(&draw_info->gradient);
gradient_vector=(&gradient->gradient_vector);
point.x=gradient_vector->x2-gradient_vector->x1;
point.y=gradient_vector->y2-gradient_vector->y1;
length=sqrt(point.x*point.x+point.y*point.y);
bounding_box=gradient->bounding_box;
status=MagickTrue;
exception=(&image->exception);
GetMagickPixelPacket(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,bounding_box.height-bounding_box.y,1)
#endif
for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++)
{
double
alpha,
offset;
MagickPixelPacket
composite,
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
i,
x;
register PixelPacket
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
pixel=zero;
composite=zero;
offset=GetStopColorOffset(gradient,0,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++)
{
SetMagickPixelPacket(image,q,indexes+x,&pixel);
switch (gradient->spread)
{
case UndefinedSpread:
case PadSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if ((offset < 0.0) || (i == 0))
composite=gradient->stops[0].color;
else
if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops))
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case ReflectSpread:
{
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type != RadialGradient)
offset*=PerceptibleReciprocal(length);
}
if (offset < 0.0)
offset=(-offset);
if ((ssize_t) fmod(offset,2.0) == 0)
offset=fmod(offset,1.0);
else
offset=1.0-fmod(offset,1.0);
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
case RepeatSpread:
{
double
repeat;
MagickBooleanType
antialias;
antialias=MagickFalse;
repeat=0.0;
if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) ||
(y != (ssize_t) ceil(gradient_vector->y1-0.5)))
{
offset=GetStopColorOffset(gradient,x,y);
if (gradient->type == LinearGradient)
{
repeat=fmod(offset,length);
if (repeat < 0.0)
repeat=length-fmod(-repeat,length);
else
repeat=fmod(offset,length);
antialias=(repeat < length) && ((repeat+1.0) > length) ?
MagickTrue : MagickFalse;
offset=PerceptibleReciprocal(length)*repeat;
}
else
{
repeat=fmod(offset,(double) gradient->radius);
if (repeat < 0.0)
repeat=gradient->radius-fmod(-repeat,
(double) gradient->radius);
else
repeat=fmod(offset,(double) gradient->radius);
antialias=repeat+1.0 > gradient->radius ? MagickTrue :
MagickFalse;
offset=repeat/gradient->radius;
}
}
for (i=0; i < (ssize_t) gradient->number_stops; i++)
if (offset < gradient->stops[i].offset)
break;
if (i == 0)
composite=gradient->stops[0].color;
else
if (i == (ssize_t) gradient->number_stops)
composite=gradient->stops[gradient->number_stops-1].color;
else
{
j=i;
i--;
alpha=(offset-gradient->stops[i].offset)/
(gradient->stops[j].offset-gradient->stops[i].offset);
if (antialias != MagickFalse)
{
if (gradient->type == LinearGradient)
alpha=length-repeat;
else
alpha=gradient->radius-repeat;
i=0;
j=(ssize_t) gradient->number_stops-1L;
}
MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha,
&gradient->stops[j].color,alpha,&composite);
}
break;
}
}
MagickPixelCompositeOver(&composite,composite.opacity,&pixel,
pixel.opacity,&pixel);
SetPixelPacket(image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawImage() draws a graphic primitive on your image. The primitive
% may be represented as a string or filename. Precede the filename with an
% "at" sign (@) and the contents of the file are drawn on the image. You
% can affect how text is drawn by setting one or more members of the draw
% info structure.
%
% The format of the DrawImage method is:
%
% MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
*/
static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info,
const size_t pad)
{
double
extent;
size_t
quantum;
/*
Check if there is enough storage for drawing pimitives.
*/
extent=(double) mvg_info->offset+pad+PrimitiveExtentPad;
quantum=sizeof(**mvg_info->primitive_info);
if (((extent*quantum) < (double) SSIZE_MAX) &&
((extent*quantum) < (double) GetMaxMemoryRequest()))
{
if (extent <= (double) *mvg_info->extent)
return(MagickTrue);
*mvg_info->primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(
*mvg_info->primitive_info,(size_t) extent,quantum);
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
{
register ssize_t
i;
*mvg_info->extent=(size_t) extent;
for (i=mvg_info->offset+1; i < (ssize_t) extent; i++)
(*mvg_info->primitive_info)[i].primitive=UndefinedPrimitive;
return(MagickTrue);
}
}
/*
Reallocation failed, allocate a primitive to facilitate unwinding.
*/
if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL)
*mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(
*mvg_info->primitive_info);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
*mvg_info->primitive_info=(PrimitiveInfo *) AcquireCriticalMemory(
PrimitiveExtentPad*quantum);
(void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum);
*mvg_info->extent=1;
return(MagickFalse);
}
MagickExport int MVGMacroCompare(const void *target,const void *source)
{
const char
*p,
*q;
p=(const char *) target;
q=(const char *) source;
return(strcmp(p,q));
}
static SplayTreeInfo *GetMVGMacros(const char *primitive)
{
char
*macro,
*token;
const char
*q;
size_t
extent;
SplayTreeInfo
*macros;
/*
Scan graphic primitives for definitions and classes.
*/
if (primitive == (const char *) NULL)
return((SplayTreeInfo *) NULL);
macros=NewSplayTree(MVGMacroCompare,RelinquishMagickMemory,
RelinquishMagickMemory);
macro=AcquireString(primitive);
token=AcquireString(primitive);
extent=strlen(token)+MagickPathExtent;
for (q=primitive; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare("push",token) == 0)
{
register const char
*end,
*start;
(void) GetNextToken(q,&q,extent,token);
if (*q == '"')
{
char
name[MagickPathExtent];
const char
*p;
ssize_t
n;
/*
Named macro (e.g. push graphic-context "wheel").
*/
(void) GetNextToken(q,&q,extent,token);
start=q;
end=q;
(void) CopyMagickString(name,token,MagickPathExtent);
n=1;
for (p=q; *p != '\0'; )
{
if (GetNextToken(p,&p,extent,token) < 1)
break;
if (*token == '\0')
break;
if (LocaleCompare(token,"pop") == 0)
{
end=p-strlen(token)-1;
n--;
}
if (LocaleCompare(token,"push") == 0)
n++;
if ((n == 0) && (end > start))
{
/*
Extract macro.
*/
(void) GetNextToken(p,&p,extent,token);
(void) CopyMagickString(macro,start,(size_t) (end-start));
(void) AddValueToSplayTree(macros,ConstantString(name),
ConstantString(macro));
break;
}
}
}
}
}
token=DestroyString(token);
macro=DestroyString(macro);
return(macros);
}
static inline MagickBooleanType IsPoint(const char *point)
{
char
*p;
double
value;
value=StringToDouble(point,&p);
return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse :
MagickTrue);
}
static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info,
const PointInfo point)
{
primitive_info->coordinates=1;
primitive_info->closed_subpath=MagickFalse;
primitive_info->point=point;
return(MagickTrue);
}
static MagickBooleanType RenderMVGContent(Image *image,
const DrawInfo *draw_info,const size_t depth)
{
#define RenderImageTag "Render/Image"
AffineMatrix
affine,
current;
char
key[2*MaxTextExtent],
keyword[MaxTextExtent],
geometry[MaxTextExtent],
name[MaxTextExtent],
*next_token,
pattern[MaxTextExtent],
*primitive,
*token;
const char
*q;
double
angle,
coordinates,
cursor,
factor,
primitive_extent;
DrawInfo
*clone_info,
**graphic_context;
MagickBooleanType
proceed;
MagickStatusType
status;
MVGInfo
mvg_info;
PointInfo
point;
PixelPacket
start_color;
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register const char
*p;
register ssize_t
i,
x;
SegmentInfo
bounds;
size_t
extent,
number_points;
SplayTreeInfo
*macros;
ssize_t
defsDepth,
j,
k,
n,
symbolDepth;
TypeMetric
metrics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (depth > MagickMaxRecursionDepth)
ThrowBinaryImageException(DrawError,"VectorGraphicsNestedTooDeeply",
image->filename);
if ((draw_info->primitive == (char *) NULL) ||
(*draw_info->primitive == '\0'))
return(MagickFalse);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image");
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->matte == MagickFalse)
{
status=SetImageAlphaChannel(image,OpaqueAlphaChannel);
if (status == MagickFalse)
return(MagickFalse);
}
primitive=(char *) NULL;
if ((*draw_info->primitive == '@') && (strlen(draw_info->primitive) > 1) &&
(*(draw_info->primitive+1) != '-') && (depth == 0))
primitive=FileToString(draw_info->primitive+1,~0UL,&image->exception);
else
primitive=AcquireString(draw_info->primitive);
if (primitive == (char *) NULL)
return(MagickFalse);
primitive_extent=(double) strlen(primitive);
(void) SetImageArtifact(image,"mvg:vector-graphics",primitive);
n=0;
/*
Allocate primitive info memory.
*/
graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
primitive=DestroyString(primitive);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
number_points=PrimitiveExtentPad;
primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points,
sizeof(*primitive_info));
if (primitive_info == (PrimitiveInfo *) NULL)
{
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(primitive_info,0,(size_t) number_points*
sizeof(*primitive_info));
(void) memset(&mvg_info,0,sizeof(mvg_info));
mvg_info.primitive_info=(&primitive_info);
mvg_info.extent=(&number_points);
mvg_info.exception=(&image->exception);
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info);
graphic_context[n]->viewbox=image->page;
if ((image->page.width == 0) || (image->page.height == 0))
{
graphic_context[n]->viewbox.width=image->columns;
graphic_context[n]->viewbox.height=image->rows;
}
token=AcquireString(primitive);
extent=strlen(token)+MaxTextExtent;
cursor=0.0;
defsDepth=0;
symbolDepth=0;
macros=GetMVGMacros(primitive);
status=QueryColorDatabase("#000000",&start_color,&image->exception);
for (q=primitive; *q != '\0'; )
{
/*
Interpret graphic primitive.
*/
if (GetNextToken(q,&q,MaxTextExtent,keyword) < 1)
break;
if (*keyword == '\0')
break;
if (*keyword == '#')
{
/*
Comment.
*/
while ((*q != '\n') && (*q != '\0'))
q++;
continue;
}
p=q-strlen(keyword)-1;
primitive_type=UndefinedPrimitive;
current=graphic_context[n]->affine;
GetAffineMatrix(&affine);
*token='\0';
switch (*keyword)
{
case ';':
break;
case 'a':
case 'A':
{
if (LocaleCompare("affine",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.rx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ry=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("arc",keyword) == 0)
{
primitive_type=ArcPrimitive;
break;
}
status=MagickFalse;
break;
}
case 'b':
case 'B':
{
if (LocaleCompare("bezier",keyword) == 0)
{
primitive_type=BezierPrimitive;
break;
}
if (LocaleCompare("border-color",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&graphic_context[n]->border_color,
&image->exception);
break;
}
status=MagickFalse;
break;
}
case 'c':
case 'C':
{
if (LocaleCompare("class",keyword) == 0)
{
const char
*mvg_class;
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
if (LocaleCompare(token,graphic_context[n]->id) == 0)
break;
mvg_class=(const char *) GetValueFromSplayTree(macros,token);
if (mvg_class != (const char *) NULL)
{
char
*elements;
ssize_t
offset;
/*
Inject class elements in stream.
*/
offset=(ssize_t) (p-primitive);
elements=AcquireString(primitive);
elements[offset]='\0';
(void) ConcatenateString(&elements,mvg_class);
(void) ConcatenateString(&elements,"\n");
(void) ConcatenateString(&elements,q);
primitive=DestroyString(primitive);
primitive=elements;
q=primitive+offset;
}
break;
}
if (LocaleCompare("clip-path",keyword) == 0)
{
const char
*clip_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
if (*token == '\0')
{
status=MagickFalse;
break;
}
(void) CloneString(&graphic_context[n]->clip_mask,token);
clip_path=(const char *) GetValueFromSplayTree(macros,token);
if (clip_path != (const char *) NULL)
{
if (graphic_context[n]->clipping_mask != (Image *) NULL)
graphic_context[n]->clipping_mask=
DestroyImage(graphic_context[n]->clipping_mask);
graphic_context[n]->clipping_mask=DrawClippingMask(image,
graphic_context[n],token,clip_path,&image->exception);
if (graphic_context[n]->compliance != SVGCompliance)
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,
graphic_context[n]->clip_mask,clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
}
}
break;
}
if (LocaleCompare("clip-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("clip-units",keyword) == 0)
{
ssize_t
clip_units;
(void) GetNextToken(q,&q,extent,token);
clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse,
token);
if (clip_units == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->clip_units=(ClipPathUnits) clip_units;
if (clip_units == ObjectBoundingBox)
{
GetAffineMatrix(¤t);
affine.sx=draw_info->bounds.x2;
affine.sy=draw_info->bounds.y2;
affine.tx=draw_info->bounds.x1;
affine.ty=draw_info->bounds.y1;
break;
}
break;
}
if (LocaleCompare("circle",keyword) == 0)
{
primitive_type=CirclePrimitive;
break;
}
if (LocaleCompare("color",keyword) == 0)
{
primitive_type=ColorPrimitive;
break;
}
if (LocaleCompare("compliance",keyword) == 0)
{
/*
MVG compliance associates a clipping mask with an image; SVG
compliance associates a clipping mask with a graphics context.
*/
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->compliance=(ComplianceType) ParseCommandOption(
MagickComplianceOptions,MagickFalse,token);
break;
}
status=MagickFalse;
break;
}
case 'd':
case 'D':
{
if (LocaleCompare("decorate",keyword) == 0)
{
ssize_t
decorate;
(void) GetNextToken(q,&q,extent,token);
decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse,
token);
if (decorate == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->decorate=(DecorationType) decorate;
break;
}
if (LocaleCompare("density",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->density,token);
break;
}
if (LocaleCompare("direction",keyword) == 0)
{
ssize_t
direction;
(void) GetNextToken(q,&q,extent,token);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
token);
if (direction == -1)
status=MagickFalse;
else
graphic_context[n]->direction=(DirectionType) direction;
break;
}
status=MagickFalse;
break;
}
case 'e':
case 'E':
{
if (LocaleCompare("ellipse",keyword) == 0)
{
primitive_type=EllipsePrimitive;
break;
}
if (LocaleCompare("encoding",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->encoding,token);
break;
}
status=MagickFalse;
break;
}
case 'f':
case 'F':
{
if (LocaleCompare("fill",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->fill_pattern);
else
{
status&=QueryColorDatabase(token,&graphic_context[n]->fill,
&image->exception);
if (graphic_context[n]->fill_opacity != OpaqueOpacity)
graphic_context[n]->fill.opacity=ClampToQuantum(
graphic_context[n]->fill_opacity);
}
break;
}
if (LocaleCompare("fill-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->fill_opacity*=(1.0-opacity);
else
graphic_context[n]->fill_opacity=(QuantumRange-
graphic_context[n]->fill_opacity)*(1.0-opacity);
if (graphic_context[n]->fill.opacity != TransparentOpacity)
graphic_context[n]->fill.opacity=(Quantum)
graphic_context[n]->fill_opacity;
else
graphic_context[n]->fill.opacity=ClampToQuantum(QuantumRange*
opacity);
break;
}
if (LocaleCompare("fill-rule",keyword) == 0)
{
ssize_t
fill_rule;
(void) GetNextToken(q,&q,extent,token);
fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse,
token);
if (fill_rule == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->fill_rule=(FillRule) fill_rule;
break;
}
if (LocaleCompare("font",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->font,token);
if (LocaleCompare("none",token) == 0)
graphic_context[n]->font=(char *) RelinquishMagickMemory(
graphic_context[n]->font);
break;
}
if (LocaleCompare("font-family",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->family,token);
break;
}
if (LocaleCompare("font-size",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->pointsize=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("font-stretch",keyword) == 0)
{
ssize_t
stretch;
(void) GetNextToken(q,&q,extent,token);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token);
if (stretch == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->stretch=(StretchType) stretch;
break;
}
if (LocaleCompare("font-style",keyword) == 0)
{
ssize_t
style;
(void) GetNextToken(q,&q,extent,token);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,token);
if (style == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->style=(StyleType) style;
break;
}
if (LocaleCompare("font-weight",keyword) == 0)
{
ssize_t
weight;
(void) GetNextToken(q,&q,extent,token);
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(token);
graphic_context[n]->weight=(size_t) weight;
break;
}
status=MagickFalse;
break;
}
case 'g':
case 'G':
{
if (LocaleCompare("gradient-units",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("gravity",keyword) == 0)
{
ssize_t
gravity;
(void) GetNextToken(q,&q,extent,token);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token);
if (gravity == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->gravity=(GravityType) gravity;
break;
}
status=MagickFalse;
break;
}
case 'i':
case 'I':
{
if (LocaleCompare("image",keyword) == 0)
{
ssize_t
compose;
primitive_type=ImagePrimitive;
(void) GetNextToken(q,&q,extent,token);
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token);
if (compose == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->compose=(CompositeOperator) compose;
break;
}
if (LocaleCompare("interline-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interline_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("interword-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'k':
case 'K':
{
if (LocaleCompare("kerning",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->kerning=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'l':
case 'L':
{
if (LocaleCompare("letter-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (IsPoint(token) == MagickFalse)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
clone_info->text=AcquireString(" ");
status&=GetTypeMetrics(image,clone_info,&metrics);
graphic_context[n]->kerning=metrics.width*
StringToDouble(token,&next_token);
clone_info=DestroyDrawInfo(clone_info);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("line",keyword) == 0)
{
primitive_type=LinePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'm':
case 'M':
{
if (LocaleCompare("mask",keyword) == 0)
{
const char
*mask_path;
/*
Take a node from within the MVG document, and duplicate it here.
*/
(void) GetNextToken(q,&q,extent,token);
mask_path=(const char *) GetValueFromSplayTree(macros,token);
if (mask_path != (const char *) NULL)
{
if (graphic_context[n]->composite_mask != (Image *) NULL)
graphic_context[n]->composite_mask=
DestroyImage(graphic_context[n]->composite_mask);
graphic_context[n]->composite_mask=DrawCompositeMask(image,
graphic_context[n],token,mask_path,&image->exception);
if (graphic_context[n]->compliance != SVGCompliance)
status=SetImageMask(image,graphic_context[n]->composite_mask);
}
break;
}
if (LocaleCompare("matte",keyword) == 0)
{
primitive_type=MattePrimitive;
break;
}
status=MagickFalse;
break;
}
case 'o':
case 'O':
{
if (LocaleCompare("offset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->compliance == SVGCompliance)
{
graphic_context[n]->fill_opacity*=(1.0-opacity);
graphic_context[n]->stroke_opacity*=(1.0-opacity);
}
else
{
graphic_context[n]->fill_opacity=(QuantumRange-
graphic_context[n]->fill_opacity)*(1.0-opacity);
graphic_context[n]->stroke_opacity=(QuantumRange-
graphic_context[n]->stroke_opacity)*(1.0-opacity);
}
break;
}
status=MagickFalse;
break;
}
case 'p':
case 'P':
{
if (LocaleCompare("path",keyword) == 0)
{
primitive_type=PathPrimitive;
break;
}
if (LocaleCompare("point",keyword) == 0)
{
primitive_type=PointPrimitive;
break;
}
if (LocaleCompare("polyline",keyword) == 0)
{
primitive_type=PolylinePrimitive;
break;
}
if (LocaleCompare("polygon",keyword) == 0)
{
primitive_type=PolygonPrimitive;
break;
}
if (LocaleCompare("pop",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
break;
if (LocaleCompare("clip-path",token) == 0)
break;
if (LocaleCompare("defs",token) == 0)
{
defsDepth--;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
break;
if (LocaleCompare("graphic-context",token) == 0)
{
if (n <= 0)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),DrawError,
"UnbalancedGraphicContextPushPop","`%s'",token);
status=MagickFalse;
n=0;
break;
}
if ((graphic_context[n]->clip_mask != (char *) NULL) &&
(graphic_context[n]->compliance != SVGCompliance))
if (LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0)
status=SetImageClipMask(image,(Image *) NULL);
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
n--;
break;
}
if (LocaleCompare("mask",token) == 0)
break;
if (LocaleCompare("pattern",token) == 0)
break;
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth--;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
if (LocaleCompare("push",keyword) == 0)
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare("class",token) == 0)
{
/*
Class context.
*/
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"class") != 0)
continue;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("clip-path",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"clip-path") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("defs",token) == 0)
{
defsDepth++;
graphic_context[n]->render=defsDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
if (LocaleCompare("gradient",token) == 0)
{
char
key[2*MaxTextExtent],
name[MaxTextExtent],
type[MaxTextExtent];
SegmentInfo
segment;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MaxTextExtent);
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(type,token,MaxTextExtent);
(void) GetNextToken(q,&q,extent,token);
segment.x1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y1=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.x2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
segment.y2=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (LocaleCompare(type,"radial") == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
}
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"gradient") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
bounds.x1=graphic_context[n]->affine.sx*segment.x1+
graphic_context[n]->affine.ry*segment.y1+
graphic_context[n]->affine.tx;
bounds.y1=graphic_context[n]->affine.rx*segment.x1+
graphic_context[n]->affine.sy*segment.y1+
graphic_context[n]->affine.ty;
bounds.x2=graphic_context[n]->affine.sx*segment.x2+
graphic_context[n]->affine.ry*segment.y2+
graphic_context[n]->affine.tx;
bounds.y2=graphic_context[n]->affine.rx*segment.x2+
graphic_context[n]->affine.sy*segment.y2+
graphic_context[n]->affine.ty;
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-type",name);
(void) SetImageArtifact(image,key,type);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%gx%g%+.15g%+.15g",
MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0),
MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0),
bounds.x1,bounds.y1);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("graphic-context",token) == 0)
{
n++;
graphic_context=(DrawInfo **) ResizeQuantumMemory(
graphic_context,(size_t) (n+1),sizeof(*graphic_context));
if (graphic_context == (DrawInfo **) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
break;
}
graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,
graphic_context[n-1]);
if (*q == '"')
{
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&graphic_context[n]->id,token);
}
break;
}
if (LocaleCompare("mask",token) == 0)
{
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("pattern",token) == 0)
{
RectangleInfo
bounds;
(void) GetNextToken(q,&q,extent,token);
(void) CopyMagickString(name,token,MaxTextExtent);
(void) GetNextToken(q,&q,extent,token);
bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.width=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
bounds.height=(size_t) floor(StringToDouble(token,&next_token)+
0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
for (p=q; *q != '\0'; )
{
if (GetNextToken(q,&q,extent,token) < 1)
break;
if (LocaleCompare(token,"pop") != 0)
continue;
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (LocaleCompare(token,"pattern") != 0)
continue;
break;
}
if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p))
{
status=MagickFalse;
break;
}
(void) CopyMagickString(token,p,(size_t) (q-p-4+1));
(void) FormatLocaleString(key,MaxTextExtent,"%s",name);
(void) SetImageArtifact(image,key,token);
(void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name);
(void) FormatLocaleString(geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double)
bounds.height,(double) bounds.x,(double) bounds.y);
(void) SetImageArtifact(image,key,geometry);
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("symbol",token) == 0)
{
symbolDepth++;
graphic_context[n]->render=symbolDepth > 0 ? MagickFalse :
MagickTrue;
break;
}
status=MagickFalse;
break;
}
status=MagickFalse;
break;
}
case 'r':
case 'R':
{
if (LocaleCompare("rectangle",keyword) == 0)
{
primitive_type=RectanglePrimitive;
break;
}
if (LocaleCompare("rotate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0)));
affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0)));
affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0))));
affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0)));
break;
}
if (LocaleCompare("roundRectangle",keyword) == 0)
{
primitive_type=RoundRectanglePrimitive;
break;
}
status=MagickFalse;
break;
}
case 's':
case 'S':
{
if (LocaleCompare("scale",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.sx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.sy=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("skewX",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.ry=sin(DegreesToRadians(angle));
break;
}
if (LocaleCompare("skewY",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
affine.rx=(-tan(DegreesToRadians(angle)/2.0));
break;
}
if (LocaleCompare("stop-color",keyword) == 0)
{
GradientType
type;
PixelPacket
stop_color;
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&stop_color,&image->exception);
type=LinearGradient;
if (draw_info->gradient.type == RadialGradient)
type=RadialGradient;
(void) GradientImage(image,type,PadSpread,&start_color,&stop_color);
start_color=stop_color;
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
(void) FormatLocaleString(pattern,MaxTextExtent,"%s",token);
if (GetImageArtifact(image,pattern) != (const char *) NULL)
(void) DrawPatternPath(image,draw_info,token,
&graphic_context[n]->stroke_pattern);
else
{
status&=QueryColorDatabase(token,&graphic_context[n]->stroke,
&image->exception);
if (graphic_context[n]->stroke_opacity != OpaqueOpacity)
graphic_context[n]->stroke.opacity=ClampToQuantum(
graphic_context[n]->stroke_opacity);
}
break;
}
if (LocaleCompare("stroke-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("stroke-dasharray",keyword) == 0)
{
if (graphic_context[n]->dash_pattern != (double *) NULL)
graphic_context[n]->dash_pattern=(double *)
RelinquishMagickMemory(graphic_context[n]->dash_pattern);
if (IsPoint(q) != MagickFalse)
{
const char
*p;
p=q;
(void) GetNextToken(p,&p,extent,token);
if (*token == ',')
(void) GetNextToken(p,&p,extent,token);
for (x=0; IsPoint(token) != MagickFalse; x++)
{
(void) GetNextToken(p,&p,extent,token);
if (*token == ',')
(void) GetNextToken(p,&p,extent,token);
}
graphic_context[n]->dash_pattern=(double *)
AcquireQuantumMemory((size_t) (2*x+2),
sizeof(*graphic_context[n]->dash_pattern));
if (graphic_context[n]->dash_pattern == (double *) NULL)
{
(void) ThrowMagickException(&image->exception,
GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","`%s'",image->filename);
status=MagickFalse;
break;
}
(void) memset(graphic_context[n]->dash_pattern,0,(size_t)
(2*x+2)*sizeof(*graphic_context[n]->dash_pattern));
for (j=0; j < x; j++)
{
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_pattern[j]=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->dash_pattern[j] < 0.0)
status=MagickFalse;
}
if ((x & 0x01) != 0)
for ( ; j < (2*x); j++)
graphic_context[n]->dash_pattern[j]=
graphic_context[n]->dash_pattern[j-x];
graphic_context[n]->dash_pattern[j]=0.0;
break;
}
(void) GetNextToken(q,&q,extent,token);
break;
}
if (LocaleCompare("stroke-dashoffset",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->dash_offset=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
if (LocaleCompare("stroke-linecap",keyword) == 0)
{
ssize_t
linecap;
(void) GetNextToken(q,&q,extent,token);
linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token);
if (linecap == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linecap=(LineCap) linecap;
break;
}
if (LocaleCompare("stroke-linejoin",keyword) == 0)
{
ssize_t
linejoin;
(void) GetNextToken(q,&q,extent,token);
linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse,
token);
if (linejoin == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->linejoin=(LineJoin) linejoin;
break;
}
if (LocaleCompare("stroke-miterlimit",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->miterlimit=StringToUnsignedLong(token);
break;
}
if (LocaleCompare("stroke-opacity",keyword) == 0)
{
double
opacity;
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0;
opacity=MagickMin(MagickMax(factor*
StringToDouble(token,&next_token),0.0),1.0);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (graphic_context[n]->compliance == SVGCompliance)
graphic_context[n]->stroke_opacity*=(1.0-opacity);
else
graphic_context[n]->stroke_opacity=(QuantumRange-
graphic_context[n]->stroke_opacity)*(1.0-opacity);
if (graphic_context[n]->stroke.opacity != TransparentOpacity)
graphic_context[n]->stroke.opacity=(Quantum)
graphic_context[n]->stroke_opacity;
else
graphic_context[n]->stroke.opacity=ClampToQuantum(QuantumRange*
opacity);
break;
}
if (LocaleCompare("stroke-width",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
if (graphic_context[n]->clip_path != MagickFalse)
break;
graphic_context[n]->stroke_width=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 't':
case 'T':
{
if (LocaleCompare("text",keyword) == 0)
{
primitive_type=TextPrimitive;
cursor=0.0;
break;
}
if (LocaleCompare("text-align",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-anchor",keyword) == 0)
{
ssize_t
align;
(void) GetNextToken(q,&q,extent,token);
align=ParseCommandOption(MagickAlignOptions,MagickFalse,token);
if (align == -1)
{
status=MagickFalse;
break;
}
graphic_context[n]->align=(AlignType) align;
break;
}
if (LocaleCompare("text-antialias",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->text_antialias=StringToLong(token) != 0 ?
MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("text-undercolor",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
status&=QueryColorDatabase(token,&graphic_context[n]->undercolor,
&image->exception);
break;
}
if (LocaleCompare("translate",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
affine.tx=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
affine.ty=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'u':
case 'U':
{
if (LocaleCompare("use",keyword) == 0)
{
const char
*use;
/*
Get a macro from the MVG document, and "use" it here.
*/
(void) GetNextToken(q,&q,extent,token);
use=(const char *) GetValueFromSplayTree(macros,token);
if (use != (const char *) NULL)
{
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
(void) CloneString(&clone_info->primitive,use);
status=RenderMVGContent(image,clone_info,depth+1);
clone_info=DestroyDrawInfo(clone_info);
}
break;
}
break;
}
case 'v':
case 'V':
{
if (LocaleCompare("viewbox",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token,
&next_token)-0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble(
token,&next_token)+0.5);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
case 'w':
case 'W':
{
if (LocaleCompare("word-spacing",keyword) == 0)
{
(void) GetNextToken(q,&q,extent,token);
graphic_context[n]->interword_spacing=StringToDouble(token,
&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
break;
}
status=MagickFalse;
break;
}
default:
{
status=MagickFalse;
break;
}
}
if (status == MagickFalse)
break;
if ((fabs(affine.sx-1.0) >= MagickEpsilon) ||
(fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) ||
(fabs(affine.sy-1.0) >= MagickEpsilon) ||
(fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon))
{
graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx;
graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx;
graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy;
graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy;
graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+
current.tx;
graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+
current.ty;
}
if (primitive_type == UndefinedPrimitive)
{
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int)
(q-p-1),p);
continue;
}
/*
Parse the primitive attributes.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
i=0;
mvg_info.offset=i;
j=0;
primitive_info[0].point.x=0.0;
primitive_info[0].point.y=0.0;
primitive_info[0].coordinates=0;
primitive_info[0].method=FloodfillMethod;
primitive_info[0].closed_subpath=MagickFalse;
for (x=0; *q != '\0'; x++)
{
/*
Define points.
*/
if (IsPoint(q) == MagickFalse)
break;
(void) GetNextToken(q,&q,extent,token);
point.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,&q,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
point.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(q,(const char **) NULL,extent,token);
if (*token == ',')
(void) GetNextToken(q,&q,extent,token);
primitive_info[i].primitive=primitive_type;
primitive_info[i].point=point;
primitive_info[i].coordinates=0;
primitive_info[i].method=FloodfillMethod;
primitive_info[i].closed_subpath=MagickFalse;
i++;
mvg_info.offset=i;
if (i < (ssize_t) number_points)
continue;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
if (status == MagickFalse)
break;
primitive_info[j].primitive=primitive_type;
primitive_info[j].coordinates=(size_t) x;
primitive_info[j].method=FloodfillMethod;
primitive_info[j].closed_subpath=MagickFalse;
/*
Circumscribe primitive within a circle.
*/
bounds.x1=primitive_info[j].point.x;
bounds.y1=primitive_info[j].point.y;
bounds.x2=primitive_info[j].point.x;
bounds.y2=primitive_info[j].point.y;
for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++)
{
point=primitive_info[j+k].point;
if (point.x < bounds.x1)
bounds.x1=point.x;
if (point.y < bounds.y1)
bounds.y1=point.y;
if (point.x > bounds.x2)
bounds.x2=point.x;
if (point.y > bounds.y2)
bounds.y2=point.y;
}
/*
Speculate how many points our primitive might consume.
*/
coordinates=(double) primitive_info[j].coordinates;
switch (primitive_type)
{
case RectanglePrimitive:
{
coordinates*=5.0;
break;
}
case RoundRectanglePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot((double) alpha,(double) beta);
coordinates*=5.0;
coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0*
BezierQuantum+360.0;
break;
}
case BezierPrimitive:
{
coordinates=(double) (BezierQuantum*primitive_info[j].coordinates);
if (primitive_info[j].coordinates > (107*BezierQuantum))
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
DrawError,"TooManyBezierCoordinates","`%s'",token);
status=MagickFalse;
break;
}
break;
}
case PathPrimitive:
{
char
*s,
*t;
(void) GetNextToken(q,&q,extent,token);
coordinates=1.0;
t=token;
for (s=token; *s != '\0'; s=t)
{
double
value;
value=StringToDouble(s,&t);
(void) value;
if (s == t)
{
t++;
continue;
}
coordinates++;
}
for (s=token; *s != '\0'; s++)
if (strspn(s,"AaCcQqSsTt") != 0)
coordinates+=(20.0*BezierQuantum)+360.0;
break;
}
case CirclePrimitive:
case ArcPrimitive:
case EllipsePrimitive:
{
double
alpha,
beta,
radius;
alpha=bounds.x2-bounds.x1;
beta=bounds.y2-bounds.y1;
radius=hypot(alpha,beta);
coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0;
break;
}
default:
break;
}
if (status == MagickFalse)
break;
if (((size_t) (i+coordinates)) >= number_points)
{
/*
Resize based on speculative points required by primitive.
*/
number_points+=coordinates+1;
if (number_points < (size_t) coordinates)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
break;
}
mvg_info.offset=i;
status&=CheckPrimitiveExtent(&mvg_info,number_points);
}
status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad);
if (status == MagickFalse)
break;
mvg_info.offset=j;
switch (primitive_type)
{
case PointPrimitive:
default:
{
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
status&=TracePoint(primitive_info+j,primitive_info[j].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case LinePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceLine(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RectanglePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceRectangle(primitive_info+j,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case RoundRectanglePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+2].point.x < 0.0) ||
(primitive_info[j+2].point.y < 0.0))
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0)
{
status=MagickFalse;
break;
}
status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case ArcPrimitive:
{
if (primitive_info[j].coordinates != 3)
{
primitive_type=UndefinedPrimitive;
break;
}
status&=TraceArc(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case EllipsePrimitive:
{
if (primitive_info[j].coordinates != 3)
{
status=MagickFalse;
break;
}
if ((primitive_info[j+1].point.x < 0.0) ||
(primitive_info[j+1].point.y < 0.0))
{
status=MagickFalse;
break;
}
status&=TraceEllipse(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point,primitive_info[j+2].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case CirclePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
status&=TraceCircle(&mvg_info,primitive_info[j].point,
primitive_info[j+1].point);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PolylinePrimitive:
{
if (primitive_info[j].coordinates < 1)
{
status=MagickFalse;
break;
}
break;
}
case PolygonPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
primitive_info[i]=primitive_info[j];
primitive_info[i].coordinates=0;
primitive_info[j].coordinates++;
primitive_info[j].closed_subpath=MagickTrue;
i++;
break;
}
case BezierPrimitive:
{
if (primitive_info[j].coordinates < 3)
{
status=MagickFalse;
break;
}
status&=TraceBezier(&mvg_info,primitive_info[j].coordinates);
i=(ssize_t) (j+primitive_info[j].coordinates);
break;
}
case PathPrimitive:
{
coordinates=(double) TracePath(image,&mvg_info,token);
if (coordinates == 0)
{
status=MagickFalse;
break;
}
i=(ssize_t) (j+coordinates);
break;
}
case ColorPrimitive:
case MattePrimitive:
{
ssize_t
method;
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
method=ParseCommandOption(MagickMethodOptions,MagickFalse,token);
if (method == -1)
{
status=MagickFalse;
break;
}
primitive_info[j].method=(PaintMethod) method;
break;
}
case TextPrimitive:
{
char
geometry[MagickPathExtent];
if (primitive_info[j].coordinates != 1)
{
status=MagickFalse;
break;
}
if (*token != ',')
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
/*
Compute text cursor offset.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]);
if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) &&
(fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon))
{
mvg_info.point=primitive_info->point;
primitive_info->point.x+=cursor;
}
else
{
mvg_info.point=primitive_info->point;
cursor=0.0;
}
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
clone_info->render=MagickFalse;
clone_info->text=AcquireString(token);
status&=GetTypeMetrics(image,clone_info,&metrics);
clone_info=DestroyDrawInfo(clone_info);
cursor+=metrics.width;
if (graphic_context[n]->compliance != SVGCompliance)
cursor=0.0;
break;
}
case ImagePrimitive:
{
if (primitive_info[j].coordinates != 2)
{
status=MagickFalse;
break;
}
(void) GetNextToken(q,&q,extent,token);
(void) CloneString(&primitive_info[j].text,token);
break;
}
}
mvg_info.offset=i;
if (primitive_info == (PrimitiveInfo *) NULL)
break;
if ((image->debug != MagickFalse) && (q > p))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),
p);
if (status == MagickFalse)
break;
primitive_info[i].primitive=UndefinedPrimitive;
if (i == 0)
continue;
/*
Transform points.
*/
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+
graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx;
primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+
graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty;
point=primitive_info[i].point;
if (point.x < graphic_context[n]->bounds.x1)
graphic_context[n]->bounds.x1=point.x;
if (point.y < graphic_context[n]->bounds.y1)
graphic_context[n]->bounds.y1=point.y;
if (point.x > graphic_context[n]->bounds.x2)
graphic_context[n]->bounds.x2=point.x;
if (point.y > graphic_context[n]->bounds.y2)
graphic_context[n]->bounds.y2=point.y;
if (primitive_info[i].primitive == ImagePrimitive)
break;
if (i >= (ssize_t) number_points)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
}
if (graphic_context[n]->render != MagickFalse)
{
if ((n != 0) && (graphic_context[n]->compliance != SVGCompliance) &&
(graphic_context[n]->clip_mask != (char *) NULL) &&
(LocaleCompare(graphic_context[n]->clip_mask,
graphic_context[n-1]->clip_mask) != 0))
{
const char
*clip_path;
clip_path=(const char *) GetValueFromSplayTree(macros,
graphic_context[n]->clip_mask);
if (clip_path != (const char *) NULL)
(void) SetImageArtifact(image,graphic_context[n]->clip_mask,
clip_path);
status&=DrawClipPath(image,graphic_context[n],
graphic_context[n]->clip_mask);
}
status&=DrawPrimitive(image,graphic_context[n],primitive_info);
}
proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType)
primitive_extent);
if (proceed == MagickFalse)
break;
if (status == 0)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image");
/*
Relinquish resources.
*/
macros=DestroySplayTree(macros);
token=DestroyString(token);
if (primitive_info != (PrimitiveInfo *) NULL)
{
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
if ((primitive_info[i].primitive == TextPrimitive) ||
(primitive_info[i].primitive == ImagePrimitive))
if (primitive_info[i].text != (char *) NULL)
primitive_info[i].text=DestroyString(primitive_info[i].text);
primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info);
}
primitive=DestroyString(primitive);
for ( ; n >= 0; n--)
graphic_context[n]=DestroyDrawInfo(graphic_context[n]);
graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context);
if (status == MagickFalse)
ThrowBinaryImageException(DrawError,
"NonconformingDrawingPrimitiveDefinition",keyword);
return(status != 0 ? MagickTrue : MagickFalse);
}
MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info)
{
return(RenderMVGContent(image,draw_info,0));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P a t t e r n P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPatternPath() draws a pattern.
%
% The format of the DrawPatternPath method is:
%
% MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info,
% const char *name,Image **pattern)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o name: the pattern name.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType DrawPatternPath(Image *image,
const DrawInfo *draw_info,const char *name,Image **pattern)
{
char
property[MaxTextExtent];
const char
*geometry,
*path,
*type;
DrawInfo
*clone_info;
ImageInfo
*image_info;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (const DrawInfo *) NULL);
assert(name != (const char *) NULL);
(void) FormatLocaleString(property,MaxTextExtent,"%s",name);
path=GetImageArtifact(image,property);
if (path == (const char *) NULL)
return(MagickFalse);
(void) FormatLocaleString(property,MaxTextExtent,"%s-geometry",name);
geometry=GetImageArtifact(image,property);
if (geometry == (const char *) NULL)
return(MagickFalse);
if ((*pattern) != (Image *) NULL)
*pattern=DestroyImage(*pattern);
image_info=AcquireImageInfo();
image_info->size=AcquireString(geometry);
*pattern=AcquireImage(image_info);
image_info=DestroyImageInfo(image_info);
(void) QueryColorDatabase("#00000000",&(*pattern)->background_color,
&image->exception);
(void) SetImageBackgroundColor(*pattern);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"begin pattern-path %s %s",name,geometry);
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill_pattern=NewImageList();
clone_info->stroke_pattern=NewImageList();
(void) FormatLocaleString(property,MaxTextExtent,"%s-type",name);
type=GetImageArtifact(image,property);
if (type != (const char *) NULL)
clone_info->gradient.type=(GradientType) ParseCommandOption(
MagickGradientOptions,MagickFalse,type);
(void) CloneString(&clone_info->primitive,path);
status=RenderMVGContent(*pattern,clone_info,0);
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w P o l y g o n P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPolygonPrimitive() draws a polygon on the image.
%
% The format of the DrawPolygonPrimitive method is:
%
% MagickBooleanType DrawPolygonPrimitive(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info)
{
register ssize_t
i;
assert(polygon_info != (PolygonInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (polygon_info[i] != (PolygonInfo *) NULL)
polygon_info[i]=DestroyPolygonInfo(polygon_info[i]);
polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info);
return(polygon_info);
}
static PolygonInfo **AcquirePolygonThreadSet(const DrawInfo *draw_info,
const PrimitiveInfo *primitive_info)
{
PathInfo
*magick_restrict path_info;
PolygonInfo
**polygon_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads,
sizeof(*polygon_info));
if (polygon_info == (PolygonInfo **) NULL)
return((PolygonInfo **) NULL);
(void) memset(polygon_info,0,number_threads*sizeof(*polygon_info));
path_info=ConvertPrimitiveToPath(draw_info,primitive_info);
if (path_info == (PathInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
polygon_info[i]=ConvertPathToPolygon(path_info);
if (polygon_info[i] == (PolygonInfo *) NULL)
return(DestroyPolygonThreadSet(polygon_info));
}
path_info=(PathInfo *) RelinquishMagickMemory(path_info);
return(polygon_info);
}
static double GetOpacityPixel(PolygonInfo *polygon_info,const double mid,
const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x,
const ssize_t y,double *stroke_opacity)
{
double
alpha,
beta,
distance,
subpath_opacity;
PointInfo
delta;
register EdgeInfo
*p;
register const PointInfo
*q;
register ssize_t
i;
ssize_t
j,
winding_number;
/*
Compute fill & stroke opacity for this (x,y) point.
*/
*stroke_opacity=0.0;
subpath_opacity=0.0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= (p->bounds.y1-mid-0.5))
break;
if ((double) y > (p->bounds.y2+mid+0.5))
{
(void) DestroyEdge(polygon_info,(size_t) j);
continue;
}
if (((double) x <= (p->bounds.x1-mid-0.5)) ||
((double) x > (p->bounds.x2+mid+0.5)))
continue;
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) p->number_points; i++)
{
if ((double) y <= (p->points[i-1].y-mid-0.5))
break;
if ((double) y > (p->points[i].y+mid+0.5))
continue;
if (p->scanline != (double) y)
{
p->scanline=(double) y;
p->highwater=(size_t) i;
}
/*
Compute distance between a point and an edge.
*/
q=p->points+i-1;
delta.x=(q+1)->x-q->x;
delta.y=(q+1)->y-q->y;
beta=delta.x*(x-q->x)+delta.y*(y-q->y);
if (beta <= 0.0)
{
delta.x=(double) x-q->x;
delta.y=(double) y-q->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=delta.x*delta.x+delta.y*delta.y;
if (beta >= alpha)
{
delta.x=(double) x-(q+1)->x;
delta.y=(double) y-(q+1)->y;
distance=delta.x*delta.x+delta.y*delta.y;
}
else
{
alpha=PerceptibleReciprocal(alpha);
beta=delta.x*(y-q->y)-delta.y*(x-q->x);
distance=alpha*beta*beta;
}
}
/*
Compute stroke & subpath opacity.
*/
beta=0.0;
if (p->ghostline == MagickFalse)
{
alpha=mid+0.5;
if ((*stroke_opacity < 1.0) &&
(distance <= ((alpha+0.25)*(alpha+0.25))))
{
alpha=mid-0.5;
if (distance <= ((alpha+0.25)*(alpha+0.25)))
*stroke_opacity=1.0;
else
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt((double) distance);
alpha=beta-mid-0.5;
if (*stroke_opacity < ((alpha-0.25)*(alpha-0.25)))
*stroke_opacity=(alpha-0.25)*(alpha-0.25);
}
}
}
if ((fill == MagickFalse) || (distance > 1.0) || (subpath_opacity >= 1.0))
continue;
if (distance <= 0.0)
{
subpath_opacity=1.0;
continue;
}
if (distance > 1.0)
continue;
if (fabs(beta) < MagickEpsilon)
{
beta=1.0;
if (fabs(distance-1.0) >= MagickEpsilon)
beta=sqrt(distance);
}
alpha=beta-1.0;
if (subpath_opacity < (alpha*alpha))
subpath_opacity=alpha*alpha;
}
}
/*
Compute fill opacity.
*/
if (fill == MagickFalse)
return(0.0);
if (subpath_opacity >= 1.0)
return(1.0);
/*
Determine winding number.
*/
winding_number=0;
p=polygon_info->edges;
for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++)
{
if ((double) y <= p->bounds.y1)
break;
if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1))
continue;
if ((double) x > p->bounds.x2)
{
winding_number+=p->direction ? 1 : -1;
continue;
}
i=(ssize_t) MagickMax((double) p->highwater,1.0);
for ( ; i < (ssize_t) (p->number_points-1); i++)
if ((double) y <= p->points[i].y)
break;
q=p->points+i-1;
if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x)))
winding_number+=p->direction ? 1 : -1;
}
if (fill_rule != NonZeroRule)
{
if ((MagickAbsoluteValue(winding_number) & 0x01) != 0)
return(1.0);
}
else
if (MagickAbsoluteValue(winding_number) != 0)
return(1.0);
return(subpath_opacity);
}
static MagickBooleanType DrawPolygonPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
double
mid;
ExceptionInfo
*exception;
MagickBooleanType
fill,
status;
PolygonInfo
**magick_restrict polygon_info;
register EdgeInfo
*p;
register ssize_t
i;
SegmentInfo
bounds;
ssize_t
start_y,
stop_y,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
assert(primitive_info != (PrimitiveInfo *) NULL);
if (primitive_info->coordinates <= 1)
return(MagickTrue);
/*
Compute bounding box.
*/
polygon_info=AcquirePolygonThreadSet(draw_info,primitive_info);
if (polygon_info == (PolygonInfo **) NULL)
return(MagickFalse);
DisableMSCWarning(4127)
if (0)
{
status=DrawBoundingRectangles(image,draw_info,polygon_info[0]);
if (status == MagickFalse)
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(status);
}
}
RestoreMSCWarning
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon");
fill=(primitive_info->method == FillToBorderMethod) ||
(primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse;
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
bounds=polygon_info[0]->edges[0].bounds;
for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++)
{
p=polygon_info[0]->edges+i;
if (p->bounds.x1 < bounds.x1)
bounds.x1=p->bounds.x1;
if (p->bounds.y1 < bounds.y1)
bounds.y1=p->bounds.y1;
if (p->bounds.x2 > bounds.x2)
bounds.x2=p->bounds.x2;
if (p->bounds.y2 > bounds.y2)
bounds.y2=p->bounds.y2;
}
bounds.x1-=(mid+1.0);
bounds.y1-=(mid+1.0);
bounds.x2+=(mid+1.0);
bounds.y2+=(mid+1.0);
if ((bounds.x1 >= (double) image->columns) ||
(bounds.y1 >= (double) image->rows) ||
(bounds.x2 <= 0.0) || (bounds.y2 <= 0.0))
{
polygon_info=DestroyPolygonThreadSet(polygon_info);
return(MagickTrue); /* virtual polygon */
}
bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x1;
bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y1;
bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ?
(double) image->columns-1.0 : bounds.x2;
bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ?
(double) image->rows-1.0 : bounds.y2;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
if ((primitive_info->coordinates == 1) ||
(polygon_info[0]->number_edges == 0))
{
/*
Draw point.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
MagickBooleanType
sync;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
x=start_x;
q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for ( ; x <= stop_x; x++)
{
if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) &&
(y == (ssize_t) ceil(primitive_info->point.y-0.5)))
(void) GetFillColor(draw_info,x-start_x,y-start_y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-polygon");
return(status);
}
/*
Draw polygon or line.
*/
start_y=(ssize_t) ceil(bounds.y1-0.5);
stop_y=(ssize_t) floor(bounds.y2+0.5);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,stop_y-start_y+1,1)
#endif
for (y=start_y; y <= stop_y; y++)
{
const int
id = GetOpenMPThreadId();
double
fill_opacity,
stroke_opacity;
PixelPacket
fill_color,
stroke_color;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
start_x,
stop_x;
if (status == MagickFalse)
continue;
start_x=(ssize_t) ceil(bounds.x1-0.5);
stop_x=(ssize_t) floor(bounds.x2+0.5);
q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+
1),1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=start_x; x <= stop_x; x++)
{
/*
Fill and/or stroke.
*/
fill_opacity=GetOpacityPixel(polygon_info[id],mid,fill,
draw_info->fill_rule,x,y,&stroke_opacity);
if (draw_info->stroke_antialias == MagickFalse)
{
fill_opacity=fill_opacity > 0.25 ? 1.0 : 0.0;
stroke_opacity=stroke_opacity > 0.25 ? 1.0 : 0.0;
}
(void) GetFillColor(draw_info,x-start_x,y-start_y,&fill_color);
fill_opacity=(double) (QuantumRange-fill_opacity*(QuantumRange-
fill_color.opacity));
MagickCompositeOver(&fill_color,(MagickRealType) fill_opacity,q,
(MagickRealType) q->opacity,q);
(void) GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color);
stroke_opacity=(double) (QuantumRange-stroke_opacity*(QuantumRange-
stroke_color.opacity));
MagickCompositeOver(&stroke_color,(MagickRealType) stroke_opacity,q,
(MagickRealType) q->opacity,q);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
polygon_info=DestroyPolygonThreadSet(polygon_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D r a w P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image.
%
% The format of the DrawPrimitive method is:
%
% MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info,
% PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
*/
static inline double ConstrainCoordinate(double x)
{
if (x < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (x > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(x);
}
static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info)
{
const char
*methods[] =
{
"point",
"replace",
"floodfill",
"filltoborder",
"reset",
"?"
};
PointInfo
p,
q,
point;
register ssize_t
i,
x;
ssize_t
coordinates,
y;
x=(ssize_t) ceil(primitive_info->point.x-0.5);
y=(ssize_t) ceil(primitive_info->point.y-0.5);
switch (primitive_info->primitive)
{
case PointPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"PointPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case ColorPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ColorPrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case MattePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"MattePrimitive %.20g,%.20g %s",(double) x,(double) y,
methods[primitive_info->method]);
return;
}
case TextPrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"TextPrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
case ImagePrimitive:
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
"ImagePrimitive %.20g,%.20g",(double) x,(double) y);
return;
}
default:
break;
}
coordinates=0;
p=primitive_info[0].point;
q.x=(-1.0);
q.y=(-1.0);
for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++)
{
point=primitive_info[i].point;
if (coordinates <= 0)
{
coordinates=(ssize_t) primitive_info[i].coordinates;
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin open (%.20g)",(double) coordinates);
p=point;
}
point=primitive_info[i].point;
if ((fabs(q.x-point.x) >= MagickEpsilon) ||
(fabs(q.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y);
q=point;
coordinates--;
if (coordinates > 0)
continue;
if ((fabs(p.x-point.x) >= MagickEpsilon) ||
(fabs(p.y-point.y) >= MagickEpsilon))
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)",
(double) coordinates);
else
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)",
(double) coordinates);
}
}
MagickExport MagickBooleanType DrawPrimitive(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickStatusType
status;
register ssize_t
i,
x;
ssize_t
y;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-primitive");
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx,
draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy,
draw_info->affine.tx,draw_info->affine.ty);
}
exception=(&image->exception);
status=MagickTrue;
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsPixelGray(&draw_info->fill) == MagickFalse) ||
(IsPixelGray(&draw_info->stroke) == MagickFalse)))
status=SetImageColorspace(image,sRGBColorspace);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageClipMask(image,draw_info->clipping_mask);
status&=SetImageMask(image,draw_info->composite_mask);
}
x=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.x-0.5));
y=(ssize_t) ceil(ConstrainCoordinate(primitive_info->point.y-0.5));
image_view=AcquireAuthenticCacheView(image,exception);
switch (primitive_info->primitive)
{
case ColorPrimitive:
{
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelPacket
target;
status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
(void) GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,DefaultChannels,draw_info,&target,x,
y,primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,q);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case MattePrimitive:
{
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
switch (primitive_info->method)
{
case PointMethod:
default:
{
PixelPacket
pixel;
PixelPacket
*q;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case ReplaceMethod:
{
MagickBooleanType
sync;
PixelPacket
pixel,
target;
status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsColorSimilar(image,q,&target) == MagickFalse)
{
q++;
continue;
}
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
case FloodfillMethod:
case FillToBorderMethod:
{
MagickPixelPacket
target;
(void) GetOneVirtualMagickPixel(image,x,y,&target,exception);
if (primitive_info->method == FillToBorderMethod)
{
target.red=(MagickRealType) draw_info->border_color.red;
target.green=(MagickRealType) draw_info->border_color.green;
target.blue=(MagickRealType) draw_info->border_color.blue;
}
status&=FloodfillPaintImage(image,OpacityChannel,draw_info,&target,x,
y,primitive_info->method == FloodfillMethod ? MagickFalse :
MagickTrue);
break;
}
case ResetMethod:
{
MagickBooleanType
sync;
PixelPacket
pixel;
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) GetFillColor(draw_info,x,y,&pixel);
SetPixelOpacity(q,pixel.opacity);
q++;
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
break;
}
break;
}
}
break;
}
case ImagePrimitive:
{
AffineMatrix
affine;
char
composite_geometry[MaxTextExtent];
Image
*composite_image,
*composite_images;
ImageInfo
*clone_info;
RectangleInfo
geometry;
ssize_t
x1,
y1;
if (primitive_info->text == (char *) NULL)
break;
clone_info=AcquireImageInfo();
composite_images=(Image *) NULL;
if (LocaleNCompare(primitive_info->text,"data:",5) == 0)
composite_images=ReadInlineImage(clone_info,primitive_info->text,
&image->exception);
else
if (*primitive_info->text != '\0')
{
(void) CopyMagickString(clone_info->filename,primitive_info->text,
MagickPathExtent);
composite_images=ReadImage(clone_info,exception);
}
clone_info=DestroyImageInfo(clone_info);
if (composite_images == (Image *) NULL)
{
status=0;
break;
}
composite_image=RemoveFirstImageFromList(&composite_images);
composite_images=DestroyImageList(composite_images);
(void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor)
NULL,(void *) NULL);
x1=(ssize_t) ceil(primitive_info[1].point.x-0.5);
y1=(ssize_t) ceil(primitive_info[1].point.y-0.5);
if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) ||
((y1 != 0L) && (y1 != (ssize_t) composite_image->rows)))
{
char
geometry[MaxTextExtent];
/*
Resize image.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g!",
primitive_info[1].point.x,primitive_info[1].point.y);
composite_image->filter=image->filter;
(void) TransformImage(&composite_image,(char *) NULL,geometry);
}
if (composite_image->matte == MagickFalse)
(void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel);
if (draw_info->opacity != OpaqueOpacity)
(void) SetImageOpacity(composite_image,draw_info->opacity);
SetGeometry(image,&geometry);
image->gravity=draw_info->gravity;
geometry.x=x;
geometry.y=y;
(void) FormatLocaleString(composite_geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double)
composite_image->rows,(double) geometry.x,(double) geometry.y);
(void) ParseGravityGeometry(image,composite_geometry,&geometry,
&image->exception);
affine=draw_info->affine;
affine.tx=(double) geometry.x;
affine.ty=(double) geometry.y;
composite_image->interpolate=image->interpolate;
if ((draw_info->compose == OverCompositeOp) ||
(draw_info->compose == SrcOverCompositeOp))
(void) DrawAffineImage(image,composite_image,&affine);
else
(void) CompositeImage(image,draw_info->compose,composite_image,
geometry.x,geometry.y);
composite_image=DestroyImage(composite_image);
break;
}
case PointPrimitive:
{
PixelPacket
fill_color;
PixelPacket
*q;
if ((y < 0) || (y >= (ssize_t) image->rows))
break;
if ((x < 0) || (x >= (ssize_t) image->columns))
break;
q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception);
if (q == (PixelPacket *) NULL)
break;
(void) GetFillColor(draw_info,x,y,&fill_color);
MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q,
(MagickRealType) q->opacity,q);
status&=SyncCacheViewAuthenticPixels(image_view,exception);
break;
}
case TextPrimitive:
{
char
geometry[MaxTextExtent];
DrawInfo
*clone_info;
if (primitive_info->text == (char *) NULL)
break;
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
(void) CloneString(&clone_info->text,primitive_info->text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f",
primitive_info->point.x,primitive_info->point.y);
(void) CloneString(&clone_info->geometry,geometry);
status&=AnnotateImage(image,clone_info);
clone_info=DestroyDrawInfo(clone_info);
break;
}
default:
{
double
mid,
scale;
DrawInfo
*clone_info;
if (IsEventLogging() != MagickFalse)
LogPrimitiveInfo(primitive_info);
scale=ExpandAffine(&draw_info->affine);
if ((draw_info->dash_pattern != (double *) NULL) &&
(fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) &&
(fabs(scale*draw_info->stroke_width) >= MagickEpsilon) &&
(draw_info->stroke.opacity != (Quantum) TransparentOpacity))
{
/*
Draw dash polygon.
*/
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
(void) DrawDashPolygon(draw_info,primitive_info,image);
break;
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
if ((mid > 1.0) &&
((draw_info->stroke.opacity != (Quantum) TransparentOpacity) ||
(draw_info->stroke_pattern != (Image *) NULL)))
{
double
x,
y;
MagickBooleanType
closed_path;
/*
Draw strokes while respecting line cap/join attributes.
*/
closed_path=primitive_info[0].closed_subpath;
i=(ssize_t) primitive_info[0].coordinates;
x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x);
y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
closed_path=MagickTrue;
if ((((draw_info->linecap == RoundCap) ||
(closed_path != MagickFalse)) &&
(draw_info->linejoin == RoundJoin)) ||
(primitive_info[i].primitive != UndefinedPrimitive))
{
(void) DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->stroke_width=0.0;
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
status&=DrawPolygonPrimitive(image,clone_info,primitive_info);
clone_info=DestroyDrawInfo(clone_info);
status&=DrawStrokePolygon(image,draw_info,primitive_info);
break;
}
status&=DrawPolygonPrimitive(image,draw_info,primitive_info);
break;
}
}
image_view=DestroyCacheView(image_view);
if (draw_info->compliance == SVGCompliance)
{
status&=SetImageClipMask(image,(Image *) NULL);
status&=SetImageMask(image,(Image *) NULL);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D r a w S t r o k e P o l y g o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on
% the image while respecting the line cap and join attributes.
%
% The format of the DrawStrokePolygon method is:
%
% MagickBooleanType DrawStrokePolygon(Image *image,
% const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o primitive_info: Specifies a pointer to a PrimitiveInfo structure.
%
%
*/
static MagickBooleanType DrawRoundLinecap(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
PrimitiveInfo
linecap[5];
register ssize_t
i;
for (i=0; i < 4; i++)
linecap[i]=(*primitive_info);
linecap[0].coordinates=4;
linecap[1].point.x+=2.0*MagickEpsilon;
linecap[2].point.x+=2.0*MagickEpsilon;
linecap[2].point.y+=2.0*MagickEpsilon;
linecap[3].point.y+=2.0*MagickEpsilon;
linecap[4].primitive=UndefinedPrimitive;
return(DrawPolygonPrimitive(image,draw_info,linecap));
}
static MagickBooleanType DrawStrokePolygon(Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
DrawInfo
*clone_info;
MagickBooleanType
closed_path;
MagickStatusType
status;
PrimitiveInfo
*stroke_polygon;
register const PrimitiveInfo
*p,
*q;
/*
Draw stroked polygon.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" begin draw-stroke-polygon");
clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info);
clone_info->fill=draw_info->stroke;
if (clone_info->fill_pattern != (Image *) NULL)
clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern);
if (clone_info->stroke_pattern != (Image *) NULL)
clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0,
MagickTrue,&clone_info->stroke_pattern->exception);
clone_info->stroke.opacity=(Quantum) TransparentOpacity;
clone_info->stroke_width=0.0;
clone_info->fill_rule=NonZeroRule;
status=MagickTrue;
for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates)
{
if (p->coordinates == 1)
continue;
stroke_polygon=TraceStrokePolygon(image,draw_info,p);
if (stroke_polygon == (PrimitiveInfo *) NULL)
{
status=0;
break;
}
status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon);
stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon);
if (status == 0)
break;
q=p+p->coordinates-1;
closed_path=p->closed_subpath;
if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse))
{
status&=DrawRoundLinecap(image,draw_info,p);
status&=DrawRoundLinecap(image,draw_info,q);
}
}
clone_info=DestroyDrawInfo(clone_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(DrawEvent,GetMagickModule(),
" end draw-stroke-polygon");
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A f f i n e M a t r i x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAffineMatrix() returns an AffineMatrix initialized to the identity
% matrix.
%
% The format of the GetAffineMatrix method is:
%
% void GetAffineMatrix(AffineMatrix *affine_matrix)
%
% A description of each parameter follows:
%
% o affine_matrix: the affine matrix.
%
*/
MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix)
{
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(affine_matrix != (AffineMatrix *) NULL);
(void) memset(affine_matrix,0,sizeof(*affine_matrix));
affine_matrix->sx=1.0;
affine_matrix->sy=1.0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t D r a w I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetDrawInfo() initializes draw_info to default values from image_info.
%
% The format of the GetDrawInfo method is:
%
% void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o draw_info: the draw info.
%
*/
MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info)
{
char
*next_token;
const char
*option;
ExceptionInfo
*exception;
ImageInfo
*clone_info;
/*
Initialize draw attributes.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(draw_info != (DrawInfo *) NULL);
(void) memset(draw_info,0,sizeof(*draw_info));
clone_info=CloneImageInfo(image_info);
GetAffineMatrix(&draw_info->affine);
exception=AcquireExceptionInfo();
(void) QueryColorDatabase("#000F",&draw_info->fill,exception);
(void) QueryColorDatabase("#FFF0",&draw_info->stroke,exception);
draw_info->stroke_antialias=clone_info->antialias;
draw_info->stroke_width=1.0;
draw_info->fill_rule=EvenOddRule;
draw_info->opacity=OpaqueOpacity;
draw_info->fill_opacity=OpaqueOpacity;
draw_info->stroke_opacity=OpaqueOpacity;
draw_info->linecap=ButtCap;
draw_info->linejoin=MiterJoin;
draw_info->miterlimit=10;
draw_info->decorate=NoDecoration;
if (clone_info->font != (char *) NULL)
draw_info->font=AcquireString(clone_info->font);
if (clone_info->density != (char *) NULL)
draw_info->density=AcquireString(clone_info->density);
draw_info->text_antialias=clone_info->antialias;
draw_info->pointsize=12.0;
if (fabs(clone_info->pointsize) >= MagickEpsilon)
draw_info->pointsize=clone_info->pointsize;
draw_info->undercolor.opacity=(Quantum) TransparentOpacity;
draw_info->border_color=clone_info->border_color;
draw_info->compose=OverCompositeOp;
if (clone_info->server_name != (char *) NULL)
draw_info->server_name=AcquireString(clone_info->server_name);
draw_info->render=MagickTrue;
draw_info->clip_path=MagickFalse;
draw_info->debug=IsEventLogging();
option=GetImageOption(clone_info,"direction");
if (option != (const char *) NULL)
draw_info->direction=(DirectionType) ParseCommandOption(
MagickDirectionOptions,MagickFalse,option);
else
draw_info->direction=UndefinedDirection;
option=GetImageOption(clone_info,"encoding");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->encoding,option);
option=GetImageOption(clone_info,"family");
if (option != (const char *) NULL)
(void) CloneString(&draw_info->family,option);
option=GetImageOption(clone_info,"fill");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->fill,exception);
option=GetImageOption(clone_info,"gravity");
if (option != (const char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"interline-spacing");
if (option != (const char *) NULL)
draw_info->interline_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"interword-spacing");
if (option != (const char *) NULL)
draw_info->interword_spacing=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"kerning");
if (option != (const char *) NULL)
draw_info->kerning=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"stroke");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->stroke,exception);
option=GetImageOption(clone_info,"strokewidth");
if (option != (const char *) NULL)
draw_info->stroke_width=StringToDouble(option,&next_token);
option=GetImageOption(clone_info,"style");
if (option != (const char *) NULL)
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,option);
option=GetImageOption(clone_info,"undercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&draw_info->undercolor,exception);
option=GetImageOption(clone_info,"weight");
if (option != (const char *) NULL)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(option);
draw_info->weight=(size_t) weight;
}
exception=DestroyExceptionInfo(exception);
draw_info->signature=MagickCoreSignature;
clone_info=DestroyImageInfo(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r m u t a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Permutate() returns the permuation of the (n,k).
%
% The format of the Permutate method is:
%
% void Permutate(ssize_t n,ssize_t k)
%
% A description of each parameter follows:
%
% o n:
%
% o k:
%
%
*/
static inline double Permutate(const ssize_t n,const ssize_t k)
{
double
r;
register ssize_t
i;
r=1.0;
for (i=k+1; i <= n; i++)
r*=i;
for (i=1; i <= (n-k); i++)
r/=i;
return(r);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a c e P r i m i t i v e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TracePrimitive is a collection of methods for generating graphic
% primitives such as arcs, ellipses, paths, etc.
%
*/
static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo degrees)
{
PointInfo
center,
radius;
center.x=0.5*(end.x+start.x);
center.y=0.5*(end.y+start.y);
radius.x=fabs(center.x-start.x);
radius.y=fabs(center.y-start.y);
return(TraceEllipse(mvg_info,center,radius,degrees));
}
static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end,const PointInfo arc,const double angle,
const MagickBooleanType large_arc,const MagickBooleanType sweep)
{
double
alpha,
beta,
delta,
factor,
gamma,
theta;
MagickStatusType
status;
PointInfo
center,
points[3],
radii;
register double
cosine,
sine;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
size_t
arc_segments;
ssize_t
offset;
offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
return(TracePoint(primitive_info,end));
radii.x=fabs(arc.x);
radii.y=fabs(arc.y);
if ((radii.x < MagickEpsilon) || (radii.y < MagickEpsilon))
return(TraceLine(primitive_info,start,end));
cosine=cos(DegreesToRadians(fmod((double) angle,360.0)));
sine=sin(DegreesToRadians(fmod((double) angle,360.0)));
center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2);
center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2);
delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/
(radii.y*radii.y);
if (delta < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
if (delta > 1.0)
{
radii.x*=sqrt((double) delta);
radii.y*=sqrt((double) delta);
}
points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x);
points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y);
points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x);
points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y);
alpha=points[1].x-points[0].x;
beta=points[1].y-points[0].y;
if (fabs(alpha*alpha+beta*beta) < MagickEpsilon)
return(TraceLine(primitive_info,start,end));
factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25;
if (factor <= 0.0)
factor=0.0;
else
{
factor=sqrt((double) factor);
if (sweep == large_arc)
factor=(-factor);
}
center.x=(double) ((points[0].x+points[1].x)/2-factor*beta);
center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha);
alpha=atan2(points[0].y-center.y,points[0].x-center.x);
theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha;
if ((theta < 0.0) && (sweep != MagickFalse))
theta+=2.0*MagickPI;
else
if ((theta > 0.0) && (sweep == MagickFalse))
theta-=2.0*MagickPI;
arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+
MagickEpsilon))));
p=primitive_info;
status=MagickTrue;
for (i=0; i < (ssize_t) arc_segments; i++)
{
beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments));
gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))*
sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/
sin(fmod((double) beta,DegreesToRadians(360.0)));
points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/
arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+
(double) i*theta/arc_segments),DegreesToRadians(360.0))));
points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)*
theta/arc_segments),DegreesToRadians(360.0))));
points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double)
(i+1)*theta/arc_segments),DegreesToRadians(360.0))));
p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x;
p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y;
(p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y*
points[0].y);
(p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y*
points[0].y);
(p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y*
points[1].y);
(p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y*
points[1].y);
(p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y*
points[2].y);
(p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y*
points[2].y);
if (i == (ssize_t) (arc_segments-1))
(p+3)->point=end;
status&=TraceBezier(mvg_info,4);
if (status == 0)
break;
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
p+=p->coordinates;
}
if (status == 0)
return(MagickFalse);
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceBezier(MVGInfo *mvg_info,
const size_t number_coordinates)
{
double
alpha,
*coefficients,
weight;
PointInfo
end,
point,
*points;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i,
j;
size_t
control_points,
quantum;
/*
Allocate coefficients.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
quantum=number_coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
for (j=i+1; j < (ssize_t) number_coordinates; j++)
{
alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y);
if (alpha > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (alpha > (double) quantum)
quantum=(size_t) alpha;
}
}
coefficients=(double *) AcquireQuantumMemory(number_coordinates,
sizeof(*coefficients));
quantum=MagickMin(quantum/number_coordinates,BezierQuantum);
points=(PointInfo *) AcquireQuantumMemory(quantum,number_coordinates*
sizeof(*points));
if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL))
{
if (points != (PointInfo *) NULL)
points=(PointInfo *) RelinquishMagickMemory(points);
if (coefficients != (double *) NULL)
coefficients=(double *) RelinquishMagickMemory(coefficients);
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
control_points=quantum*number_coordinates;
if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
/*
Compute bezier points.
*/
end=primitive_info[number_coordinates-1].point;
for (i=0; i < (ssize_t) number_coordinates; i++)
coefficients[i]=Permutate((ssize_t) number_coordinates-1,i);
weight=0.0;
for (i=0; i < (ssize_t) control_points; i++)
{
p=primitive_info;
point.x=0.0;
point.y=0.0;
alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0);
for (j=0; j < (ssize_t) number_coordinates; j++)
{
point.x+=alpha*coefficients[j]*p->point.x;
point.y+=alpha*coefficients[j]*p->point.y;
alpha*=weight/(1.0-weight);
p++;
}
points[i]=point;
weight+=1.0/control_points;
}
/*
Bezier curves are just short segmented polys.
*/
p=primitive_info;
for (i=0; i < (ssize_t) control_points; i++)
{
if (TracePoint(p,points[i]) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
}
if (TracePoint(p,end) == MagickFalse)
{
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickFalse);
}
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
points=(PointInfo *) RelinquishMagickMemory(points);
coefficients=(double *) RelinquishMagickMemory(coefficients);
return(MagickTrue);
}
static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start,
const PointInfo end)
{
double
alpha,
beta,
radius;
PointInfo
offset,
degrees;
alpha=end.x-start.x;
beta=end.y-start.y;
radius=hypot((double) alpha,(double) beta);
offset.x=(double) radius;
offset.y=(double) radius;
degrees.x=0.0;
degrees.y=360.0;
return(TraceEllipse(mvg_info,start,offset,degrees));
}
static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center,
const PointInfo radii,const PointInfo arc)
{
double
coordinates,
delta,
step,
x,
y;
PointInfo
angle,
point;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
/*
Ellipses are just short segmented polys.
*/
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
primitive_info->coordinates=0;
if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon))
return(MagickTrue);
delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y));
step=MagickPI/8.0;
if ((delta >= 0.0) && (delta < (MagickPI/8.0)))
step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0);
angle.x=DegreesToRadians(arc.x);
y=arc.y;
while (y < arc.x)
y+=360.0;
angle.y=DegreesToRadians(y);
coordinates=ceil((angle.y-angle.x)/step+1.0);
if (coordinates > (double) SSIZE_MAX)
{
(void) ThrowMagickException(mvg_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'","");
return(MagickFalse);
}
if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse)
return(MagickFalse);
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
for (p=primitive_info; angle.x < angle.y; angle.x+=step)
{
point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
}
point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x;
point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickFalse;
x=fabs(primitive_info[0].point.x-
primitive_info[primitive_info->coordinates-1].point.x);
y=fabs(primitive_info[0].point.y-
primitive_info[primitive_info->coordinates-1].point.y);
if ((x < MagickEpsilon) && (y < MagickEpsilon))
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
if (TracePoint(primitive_info,start) == MagickFalse)
return(MagickFalse);
if ((fabs(start.x-end.x) < MagickEpsilon) &&
(fabs(start.y-end.y) < MagickEpsilon))
{
primitive_info->primitive=PointPrimitive;
primitive_info->coordinates=1;
return(MagickTrue);
}
if (TracePoint(primitive_info+1,end) == MagickFalse)
return(MagickFalse);
(primitive_info+1)->primitive=primitive_info->primitive;
primitive_info->coordinates=2;
primitive_info->closed_subpath=MagickFalse;
return(MagickTrue);
}
static size_t TracePath(Image *image,MVGInfo *mvg_info,const char *path)
{
char
*next_token,
token[MaxTextExtent];
const char
*p;
double
x,
y;
int
attribute,
last_attribute;
MagickStatusType
status;
PointInfo
end = {0.0, 0.0},
points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} },
point = {0.0, 0.0},
start = {0.0, 0.0};
PrimitiveInfo
*primitive_info;
PrimitiveType
primitive_type;
register PrimitiveInfo
*q;
register ssize_t
i;
size_t
number_coordinates,
z_count;
ssize_t
subpath_offset;
subpath_offset=mvg_info->offset;
primitive_info=(*mvg_info->primitive_info)+mvg_info->offset;
status=MagickTrue;
attribute=0;
number_coordinates=0;
z_count=0;
primitive_type=primitive_info->primitive;
q=primitive_info;
for (p=path; *p != '\0'; )
{
if (status == MagickFalse)
break;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == '\0')
break;
last_attribute=attribute;
attribute=(int) (*p++);
switch (attribute)
{
case 'a':
case 'A':
{
double
angle = 0.0;
MagickBooleanType
large_arc = MagickFalse,
sweep = MagickFalse;
PointInfo
arc = {0.0, 0.0};
/*
Elliptical arc.
*/
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
arc.x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
arc.y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
angle=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse;
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'A' ? x : point.x+x);
end.y=(double) (attribute == (int) 'A' ? y : point.y+y);
status&=TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'c':
case 'C':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 4; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'C' ? x : point.x+x);
end.y=(double) (attribute == (int) 'C' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'H':
case 'h':
{
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'H' ? x: point.x+x);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'l':
case 'L':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'L' ? x : point.x+x);
point.y=(double) (attribute == (int) 'L' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'M':
case 'm':
{
/*
Move to.
*/
if (mvg_info->offset != subpath_offset)
{
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
}
i=0;
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.x=(double) (attribute == (int) 'M' ? x : point.x+x);
point.y=(double) (attribute == (int) 'M' ? y : point.y+y);
if (i == 0)
start=point;
i++;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'q':
case 'Q':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=point;
for (i=1; i < 3; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'Q' ? x : point.x+x);
end.y=(double) (attribute == (int) 'Q' ? y : point.y+y);
points[i]=end;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 's':
case 'S':
{
/*
Cubic Bézier curve.
*/
do
{
points[0]=points[3];
points[1].x=2.0*points[3].x-points[2].x;
points[1].y=2.0*points[3].y-points[2].y;
for (i=2; i < 4; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
if (*p == ',')
p++;
end.x=(double) (attribute == (int) 'S' ? x : point.x+x);
end.y=(double) (attribute == (int) 'S' ? y : point.y+y);
points[i]=end;
}
if (strchr("CcSs",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 4; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,4) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 't':
case 'T':
{
/*
Quadratic Bézier curve.
*/
do
{
points[0]=points[2];
points[1].x=2.0*points[2].x-points[1].x;
points[1].y=2.0*points[2].y-points[1].y;
for (i=2; i < 3; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
x=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
end.x=(double) (attribute == (int) 'T' ? x : point.x+x);
end.y=(double) (attribute == (int) 'T' ? y : point.y+y);
points[i]=end;
}
if (status == MagickFalse)
break;
if (strchr("QqTt",last_attribute) == (char *) NULL)
{
points[0]=point;
points[1]=point;
}
for (i=0; i < 3; i++)
(q+i)->point=points[i];
if (TraceBezier(mvg_info,3) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
point=end;
last_attribute=attribute;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'v':
case 'V':
{
/*
Line to.
*/
do
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
y=StringToDouble(token,&next_token);
if (token == next_token)
ThrowPointExpectedException(image,token);
point.y=(double) (attribute == (int) 'V' ? y : point.y+y);
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
while (isspace((int) ((unsigned char) *p)) != 0)
p++;
if (*p == ',')
p++;
} while (IsPoint(p) != MagickFalse);
break;
}
case 'z':
case 'Z':
{
/*
Close path.
*/
point=start;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(0);
q=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(q,point) == MagickFalse)
return(0);
mvg_info->offset+=q->coordinates;
q+=q->coordinates;
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
primitive_info->closed_subpath=MagickTrue;
number_coordinates+=primitive_info->coordinates;
primitive_info=q;
subpath_offset=mvg_info->offset;
z_count++;
break;
}
default:
{
ThrowPointExpectedException(image,token);
break;
}
}
}
if (status == MagickFalse)
return(0);
primitive_info=(*mvg_info->primitive_info)+subpath_offset;
primitive_info->coordinates=(size_t) (q-primitive_info);
number_coordinates+=primitive_info->coordinates;
for (i=0; i < (ssize_t) number_coordinates; i++)
{
q--;
q->primitive=primitive_type;
if (z_count > 1)
q->method=FillToBorderMethod;
}
q=primitive_info;
return(number_coordinates);
}
static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info,
const PointInfo start,const PointInfo end)
{
PointInfo
point;
register PrimitiveInfo
*p;
register ssize_t
i;
p=primitive_info;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=start.x;
point.y=end.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,end) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
point.x=end.x;
point.y=start.y;
if (TracePoint(p,point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
if (TracePoint(p,start) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info,
const PointInfo start,const PointInfo end,PointInfo arc)
{
PointInfo
degrees,
point,
segment;
PrimitiveInfo
*primitive_info;
register PrimitiveInfo
*p;
register ssize_t
i;
ssize_t
offset;
offset=mvg_info->offset;
segment.x=fabs(end.x-start.x);
segment.y=fabs(end.y-start.y);
if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon))
{
(*mvg_info->primitive_info+mvg_info->offset)->coordinates=0;
return(MagickTrue);
}
if (arc.x > (0.5*segment.x))
arc.x=0.5*segment.x;
if (arc.y > (0.5*segment.y))
arc.y=0.5*segment.y;
point.x=start.x+segment.x-arc.x;
point.y=start.y+arc.y;
degrees.x=270.0;
degrees.y=360.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+segment.x-arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=0.0;
degrees.y=90.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+segment.y-arc.y;
degrees.x=90.0;
degrees.y=180.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
point.x=start.x+arc.x;
point.y=start.y+arc.y;
degrees.x=180.0;
degrees.y=270.0;
if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
mvg_info->offset+=p->coordinates;
if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse)
return(MagickFalse);
p=(*mvg_info->primitive_info)+mvg_info->offset;
if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse)
return(MagickFalse);
p+=p->coordinates;
mvg_info->offset=offset;
primitive_info=(*mvg_info->primitive_info)+offset;
primitive_info->coordinates=(size_t) (p-primitive_info);
primitive_info->closed_subpath=MagickTrue;
for (i=0; i < (ssize_t) primitive_info->coordinates; i++)
{
p->primitive=primitive_info->primitive;
p--;
}
return(MagickTrue);
}
static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info,
const size_t number_vertices,const double offset)
{
double
distance;
register double
dx,
dy;
register ssize_t
i;
ssize_t
j;
dx=0.0;
dy=0.0;
for (i=1; i < (ssize_t) number_vertices; i++)
{
dx=primitive_info[0].point.x-primitive_info[i].point.x;
dy=primitive_info[0].point.y-primitive_info[i].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
if (i == (ssize_t) number_vertices)
i=(ssize_t) number_vertices-1L;
distance=hypot((double) dx,(double) dy);
primitive_info[0].point.x=(double) (primitive_info[i].point.x+
dx*(distance+offset)/distance);
primitive_info[0].point.y=(double) (primitive_info[i].point.y+
dy*(distance+offset)/distance);
for (j=(ssize_t) number_vertices-2; j >= 0; j--)
{
dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x;
dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y;
if ((fabs((double) dx) >= MagickEpsilon) ||
(fabs((double) dy) >= MagickEpsilon))
break;
}
distance=hypot((double) dx,(double) dy);
primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+
dx*(distance+offset)/distance);
primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+
dy*(distance+offset)/distance);
return(MagickTrue);
}
static PrimitiveInfo *TraceStrokePolygon(const Image *image,
const DrawInfo *draw_info,const PrimitiveInfo *primitive_info)
{
#define MaxStrokePad (6*BezierQuantum+360)
#define CheckPathExtent(pad) \
if ((ssize_t) (q+(pad)) >= (ssize_t) max_strokes) \
{ \
if (~max_strokes < (pad)) \
{ \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
} \
else \
{ \
max_strokes+=(pad); \
path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes+ \
MaxStrokePad,sizeof(*path_p)); \
path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes+ \
MaxStrokePad,sizeof(*path_q)); \
} \
if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \
{ \
if (path_p != (PointInfo *) NULL) \
path_p=(PointInfo *) RelinquishMagickMemory(path_p); \
if (path_q != (PointInfo *) NULL) \
path_q=(PointInfo *) RelinquishMagickMemory(path_q); \
polygon_primitive=(PrimitiveInfo *) \
RelinquishMagickMemory(polygon_primitive); \
return((PrimitiveInfo *) NULL); \
} \
}
typedef struct _LineSegment
{
double
p,
q;
} LineSegment;
double
delta_theta,
dot_product,
mid,
miterlimit;
LineSegment
dx = {0,0},
dy = {0,0},
inverse_slope = {0,0},
slope = {0,0},
theta = {0,0};
MagickBooleanType
closed_path;
PointInfo
box_p[5],
box_q[5],
center,
offset,
*path_p,
*path_q;
PrimitiveInfo
*polygon_primitive,
*stroke_polygon;
register ssize_t
i;
size_t
arc_segments,
max_strokes,
number_vertices;
ssize_t
j,
n,
p,
q;
/*
Allocate paths.
*/
number_vertices=primitive_info->coordinates;
max_strokes=2*number_vertices;
polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
number_vertices+2UL,sizeof(*polygon_primitive));
if (polygon_primitive == (PrimitiveInfo *) NULL)
return((PrimitiveInfo *) NULL);
(void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices*
sizeof(*polygon_primitive));
closed_path=primitive_info[0].closed_subpath;
if (((draw_info->linejoin == RoundJoin) ||
(draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse))
{
polygon_primitive[number_vertices]=primitive_info[1];
number_vertices++;
}
polygon_primitive[number_vertices].primitive=UndefinedPrimitive;
/*
Compute the slope for the first line segment, p.
*/
dx.p=0.0;
dy.p=0.0;
for (n=1; n < (ssize_t) number_vertices; n++)
{
dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x;
dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y;
if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon))
break;
}
if (n == (ssize_t) number_vertices)
{
if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse))
{
/*
Zero length subpath.
*/
stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory(
sizeof(*stroke_polygon));
stroke_polygon[0]=polygon_primitive[0];
stroke_polygon[0].coordinates=0;
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return(stroke_polygon);
}
n=(ssize_t) number_vertices-1L;
}
path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes+MaxStrokePad,
sizeof(*path_p));
if (path_p == (PointInfo *) NULL)
{
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes+MaxStrokePad,
sizeof(*path_q));
if (path_q == (PointInfo *) NULL)
{
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(
polygon_primitive);
return((PrimitiveInfo *) NULL);
}
slope.p=0.0;
inverse_slope.p=0.0;
if (fabs(dx.p) < MagickEpsilon)
{
if (dx.p >= 0.0)
slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.p) < MagickEpsilon)
{
if (dy.p >= 0.0)
inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.p=dy.p/dx.p;
inverse_slope.p=(-1.0/slope.p);
}
mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0;
miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid);
if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse))
(void) TraceSquareLinecap(polygon_primitive,number_vertices,mid);
offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0)));
offset.y=(double) (offset.x*inverse_slope.p);
if ((dy.p*offset.x-dx.p*offset.y) > 0.0)
{
box_p[0].x=polygon_primitive[0].point.x-offset.x;
box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p;
box_p[1].x=polygon_primitive[n].point.x-offset.x;
box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p;
box_q[0].x=polygon_primitive[0].point.x+offset.x;
box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p;
box_q[1].x=polygon_primitive[n].point.x+offset.x;
box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p;
}
else
{
box_p[0].x=polygon_primitive[0].point.x+offset.x;
box_p[0].y=polygon_primitive[0].point.y+offset.y;
box_p[1].x=polygon_primitive[n].point.x+offset.x;
box_p[1].y=polygon_primitive[n].point.y+offset.y;
box_q[0].x=polygon_primitive[0].point.x-offset.x;
box_q[0].y=polygon_primitive[0].point.y-offset.y;
box_q[1].x=polygon_primitive[n].point.x-offset.x;
box_q[1].y=polygon_primitive[n].point.y-offset.y;
}
/*
Create strokes for the line join attribute: bevel, miter, round.
*/
p=0;
q=0;
path_q[p++]=box_q[0];
path_p[q++]=box_p[0];
for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++)
{
/*
Compute the slope for this line segment, q.
*/
dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x;
dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y;
dot_product=dx.q*dx.q+dy.q*dy.q;
if (dot_product < 0.25)
continue;
slope.q=0.0;
inverse_slope.q=0.0;
if (fabs(dx.q) < MagickEpsilon)
{
if (dx.q >= 0.0)
slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
if (fabs(dy.q) < MagickEpsilon)
{
if (dy.q >= 0.0)
inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon;
else
inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon;
}
else
{
slope.q=dy.q/dx.q;
inverse_slope.q=(-1.0/slope.q);
}
offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0)));
offset.y=(double) (offset.x*inverse_slope.q);
dot_product=dy.q*offset.x-dx.q*offset.y;
if (dot_product > 0.0)
{
box_p[2].x=polygon_primitive[n].point.x-offset.x;
box_p[2].y=polygon_primitive[n].point.y-offset.y;
box_p[3].x=polygon_primitive[i].point.x-offset.x;
box_p[3].y=polygon_primitive[i].point.y-offset.y;
box_q[2].x=polygon_primitive[n].point.x+offset.x;
box_q[2].y=polygon_primitive[n].point.y+offset.y;
box_q[3].x=polygon_primitive[i].point.x+offset.x;
box_q[3].y=polygon_primitive[i].point.y+offset.y;
}
else
{
box_p[2].x=polygon_primitive[n].point.x+offset.x;
box_p[2].y=polygon_primitive[n].point.y+offset.y;
box_p[3].x=polygon_primitive[i].point.x+offset.x;
box_p[3].y=polygon_primitive[i].point.y+offset.y;
box_q[2].x=polygon_primitive[n].point.x-offset.x;
box_q[2].y=polygon_primitive[n].point.y-offset.y;
box_q[3].x=polygon_primitive[i].point.x-offset.x;
box_q[3].y=polygon_primitive[i].point.y-offset.y;
}
if (fabs((double) (slope.p-slope.q)) < MagickEpsilon)
{
box_p[4]=box_p[1];
box_q[4]=box_q[1];
}
else
{
box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+
box_p[3].y)/(slope.p-slope.q));
box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y);
box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+
box_q[3].y)/(slope.p-slope.q));
box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y);
}
CheckPathExtent(6*BezierQuantum+360);
dot_product=dx.q*dy.p-dx.p*dy.q;
if (dot_product <= 0.0)
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_p[p++]=box_p[4];
else
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x);
theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x);
if (theta.q < theta.p)
theta.q+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_q[q].x=box_q[1].x;
path_q[q].y=box_q[1].y;
q++;
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_q[q].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_q[q].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
q++;
}
path_q[q++]=box_q[2];
break;
}
default:
break;
}
else
switch (draw_info->linejoin)
{
case BevelJoin:
{
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
break;
}
case MiterJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
{
path_q[q++]=box_q[4];
path_p[p++]=box_p[4];
}
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
path_p[p++]=box_p[1];
path_p[p++]=box_p[2];
}
break;
}
case RoundJoin:
{
dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+
(box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y);
if (dot_product <= miterlimit)
path_q[q++]=box_q[4];
else
{
path_q[q++]=box_q[1];
path_q[q++]=box_q[2];
}
center=polygon_primitive[n].point;
theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x);
theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x);
if (theta.p < theta.q)
theta.p+=2.0*MagickPI;
arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/
(2.0*sqrt((double) (1.0/mid)))));
CheckPathExtent(arc_segments+6*BezierQuantum+360);
path_p[p++]=box_p[1];
for (j=1; j < (ssize_t) arc_segments; j++)
{
delta_theta=(double) (j*(theta.q-theta.p)/arc_segments);
path_p[p].x=(double) (center.x+mid*cos(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
path_p[p].y=(double) (center.y+mid*sin(fmod((double)
(theta.p+delta_theta),DegreesToRadians(360.0))));
p++;
}
path_p[p++]=box_p[2];
break;
}
default:
break;
}
slope.p=slope.q;
inverse_slope.p=inverse_slope.q;
box_p[0]=box_p[2];
box_p[1]=box_p[3];
box_q[0]=box_q[2];
box_q[1]=box_q[3];
dx.p=dx.q;
dy.p=dy.q;
n=i;
}
path_p[p++]=box_p[1];
path_q[q++]=box_q[1];
/*
Trace stroked polygon.
*/
stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t)
(p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon));
if (stroke_polygon != (PrimitiveInfo *) NULL)
{
for (i=0; i < (ssize_t) p; i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_p[i];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
}
for ( ; i < (ssize_t) (p+q+closed_path); i++)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)];
}
if (closed_path != MagickFalse)
{
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[p+closed_path].point;
i++;
}
stroke_polygon[i]=polygon_primitive[0];
stroke_polygon[i].point=stroke_polygon[0].point;
i++;
stroke_polygon[i].primitive=UndefinedPrimitive;
stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1);
}
path_p=(PointInfo *) RelinquishMagickMemory(path_p);
path_q=(PointInfo *) RelinquishMagickMemory(path_q);
polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive);
return(stroke_polygon);
}
|
conv5x5s2_pack4_neon.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv5x5s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = (w - 2*outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f);
out0.fill(_bias0);
for (int q=0; q<inch; q++)
{
float* outptr0 = out0.row(0);
const Mat img0 = bottom_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
const float* kptr = (const float*)kernel.channel(p).row(q);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j+3<outw; j+=4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0] \n"// sum0 sum1 sum2 sum3
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"// r00 r01 r02 r03
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"// r04 r05 r06 r07
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%1] \n"// r08 r09 r010
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r10 r11 r12 r13
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n"// r14 r15 r16 r17
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%2] \n"// r18 r19 r110
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v7.s[0] \n"
"fmla v23.4s, v16.4s, v29.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v7.s[1] \n"
"fmla v23.4s, v17.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v7.s[2] \n"
"fmla v23.4s, v18.4s, v29.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v7.s[3] \n"
"fmla v23.4s, v19.4s, v29.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r20 r21 r22 r23
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v6.s[0] \n"
"fmla v22.4s, v24.4s, v28.s[0] \n"
"fmla v23.4s, v24.4s, v30.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"fmla v22.4s, v25.4s, v28.s[1] \n"
"fmla v23.4s, v25.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v6.s[2] \n"
"fmla v22.4s, v26.4s, v28.s[2] \n"
"fmla v23.4s, v26.4s, v30.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v27.4s, v28.s[3] \n"
"fmla v23.4s, v27.4s, v30.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n"// r24 r25 r26 r27
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%3] \n"// r28 r29 r210
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4], #64 \n"// r30 r31 r32 r33
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"// r34 r35 r36 r37
"fmla v20.4s, v24.4s, v0.s[0] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"fmla v23.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v0.s[2] \n"
"fmla v21.4s, v26.4s, v2.s[2] \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"fmla v22.4s, v27.4s, v4.s[3] \n"
"fmla v23.4s, v27.4s, v6.s[3] \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%4] \n"// r38 r39 r310
"fmla v20.4s, v16.4s, v1.s[0] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v16.4s, v5.s[0] \n"
"fmla v23.4s, v16.4s, v7.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"fmla v22.4s, v17.4s, v5.s[1] \n"
"fmla v23.4s, v17.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v1.s[2] \n"
"fmla v21.4s, v18.4s, v3.s[2] \n"
"fmla v22.4s, v18.4s, v5.s[2] \n"
"fmla v23.4s, v18.4s, v7.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v19.4s, v5.s[3] \n"
"fmla v23.4s, v19.4s, v7.s[3] \n"
"fmla v20.4s, v24.4s, v2.s[0] \n"
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v24.4s, v6.s[0] \n"
"fmla v23.4s, v24.4s, v28.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"fmla v22.4s, v25.4s, v6.s[1] \n"
"fmla v23.4s, v25.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v2.s[2] \n"
"fmla v21.4s, v26.4s, v4.s[2] \n"
"fmla v22.4s, v26.4s, v6.s[2] \n"
"fmla v23.4s, v26.4s, v28.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v27.4s, v6.s[3] \n"
"fmla v23.4s, v27.4s, v28.s[3] \n"
"fmla v20.4s, v16.4s, v3.s[0] \n"
"fmla v21.4s, v16.4s, v5.s[0] \n"
"fmla v22.4s, v16.4s, v7.s[0] \n"
"fmla v23.4s, v16.4s, v29.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"fmla v22.4s, v17.4s, v7.s[1] \n"
"fmla v23.4s, v17.4s, v29.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v3.s[2] \n"
"fmla v21.4s, v18.4s, v5.s[2] \n"
"fmla v22.4s, v18.4s, v7.s[2] \n"
"fmla v23.4s, v18.4s, v29.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"fmla v22.4s, v19.4s, v7.s[3] \n"
"fmla v23.4s, v19.4s, v29.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"// r40 r41 r42 r43
"fmla v20.4s, v24.4s, v4.s[0] \n"
"fmla v21.4s, v24.4s, v6.s[0] \n"
"fmla v22.4s, v24.4s, v28.s[0] \n"
"fmla v23.4s, v24.4s, v30.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"fmla v22.4s, v25.4s, v28.s[1] \n"
"fmla v23.4s, v25.4s, v30.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v4.s[2] \n"
"fmla v21.4s, v26.4s, v6.s[2] \n"
"fmla v22.4s, v26.4s, v28.s[2] \n"
"fmla v23.4s, v26.4s, v30.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v27.4s, v28.s[3] \n"
"fmla v23.4s, v27.4s, v30.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%5], #64 \n"// r44 r45 r46 r47
"fmla v20.4s, v16.4s, v0.s[0] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v0.s[2] \n"
"fmla v21.4s, v18.4s, v2.s[2] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"fmla v22.4s, v19.4s, v4.s[3] \n"
"fmla v23.4s, v19.4s, v6.s[3] \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v28.4s, v29.4s, v30.4s}, [%5] \n"// r48 r49 r410
"fmla v20.4s, v24.4s, v1.s[0] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v24.4s, v5.s[0] \n"
"fmla v23.4s, v24.4s, v7.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"fmla v22.4s, v25.4s, v5.s[1] \n"
"fmla v23.4s, v25.4s, v7.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v20.4s, v26.4s, v1.s[2] \n"
"fmla v21.4s, v26.4s, v3.s[2] \n"
"fmla v22.4s, v26.4s, v5.s[2] \n"
"fmla v23.4s, v26.4s, v7.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v27.4s, v5.s[3] \n"
"fmla v23.4s, v27.4s, v7.s[3] \n"
"fmla v20.4s, v16.4s, v2.s[0] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v16.4s, v6.s[0] \n"
"fmla v23.4s, v16.4s, v28.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"fmla v22.4s, v17.4s, v6.s[1] \n"
"fmla v23.4s, v17.4s, v28.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v20.4s, v18.4s, v2.s[2] \n"
"fmla v21.4s, v18.4s, v4.s[2] \n"
"fmla v22.4s, v18.4s, v6.s[2] \n"
"fmla v23.4s, v18.4s, v28.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v19.4s, v6.s[3] \n"
"fmla v23.4s, v19.4s, v28.s[3] \n"
"fmla v20.4s, v24.4s, v3.s[0] \n"
"fmla v21.4s, v24.4s, v5.s[0] \n"
"fmla v22.4s, v24.4s, v7.s[0] \n"
"fmla v23.4s, v24.4s, v29.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"fmla v22.4s, v25.4s, v7.s[1] \n"
"fmla v23.4s, v25.4s, v29.s[1] \n"
// "prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n"
"fmla v20.4s, v26.4s, v3.s[2] \n"
"fmla v21.4s, v26.4s, v5.s[2] \n"
"fmla v22.4s, v26.4s, v7.s[2] \n"
"fmla v23.4s, v26.4s, v29.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v27.4s, v7.s[3] \n"
"fmla v23.4s, v27.4s, v29.s[3] \n"
"fmla v20.4s, v16.4s, v4.s[0] \n"
"fmla v21.4s, v16.4s, v6.s[0] \n"
"fmla v22.4s, v16.4s, v28.s[0] \n"
"fmla v23.4s, v16.4s, v30.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v17.4s, v28.s[1] \n"
"fmla v23.4s, v17.4s, v30.s[1] \n"
"fmla v20.4s, v18.4s, v4.s[2] \n"
"fmla v21.4s, v18.4s, v6.s[2] \n"
"fmla v22.4s, v18.4s, v28.s[2] \n"
"fmla v23.4s, v18.4s, v30.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v19.4s, v28.s[3] \n"
"fmla v23.4s, v19.4s, v30.s[3] \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%0], #64 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
);
#else // __aarch64__
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d24-d31} \n"// sum0 sum1 sum2 sum3
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n"// r00 r01 r02 r03
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"// r04 r05 r06 r07
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"// r08 r09
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d14[0] \n"
"vmla.f32 q15, q8, d2[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d14[1] \n"
"vmla.f32 q15, q9, d2[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d15[0] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d15[1] \n"
"vmla.f32 q15, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%1, #128] \n"
"vld1.f32 {d4-d5}, [%1 :128] \n"// r010
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d8-d15} \n"// r10 r11 r12 r13
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"// r14 r15 r16 r17
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #256] \n"
"vld1.f32 {d8-d11}, [%2 :128]! \n"// r18 r19
"vmla.f32 q12, q8, d12[0] \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d12[1] \n"
"vmla.f32 q13, q9, d0[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d13[0] \n"
"vmla.f32 q13, q10, d1[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d13[1] \n"
"vmla.f32 q13, q11, d1[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d14[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d14[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d15[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d15[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #128] \n"
"vld1.f32 {d12-d13}, [%2 :128] \n"// r110
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%3, #512] \n"
"vldm %3!, {d0-d7} \n"// r20 r21 r22 r23
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"// r24 r25 r26 r27
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"// r28 r29
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d14[0] \n"
"vmla.f32 q15, q8, d2[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d14[1] \n"
"vmla.f32 q15, q9, d2[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d15[0] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d15[1] \n"
"vmla.f32 q15, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #128] \n"
"vld1.f32 {d4-d5}, [%3 :128] \n"// r210
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"// r30 r31 r32 r33
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #512] \n"
"vldm %4!, {d0-d7} \n"// r34 r35 r36 r37
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d10[0] \n"
"vmla.f32 q13, q8, d14[0] \n"
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d10[1] \n"
"vmla.f32 q13, q9, d14[1] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q9, d6[1] \n"
"vmla.f32 q12, q10, d11[0] \n"
"vmla.f32 q13, q10, d15[0] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d11[1] \n"
"vmla.f32 q13, q11, d15[1] \n"
"vmla.f32 q14, q11, d3[1] \n"
"vmla.f32 q15, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"// r38 r39
"vmla.f32 q12, q8, d12[0] \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d12[1] \n"
"vmla.f32 q13, q9, d0[1] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q9, d8[1] \n"
"vmla.f32 q12, q10, d13[0] \n"
"vmla.f32 q13, q10, d1[0] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d13[1] \n"
"vmla.f32 q13, q11, d1[1] \n"
"vmla.f32 q14, q11, d5[1] \n"
"vmla.f32 q15, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d14[0] \n"
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d14[1] \n"
"vmla.f32 q13, q9, d2[1] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q9, d10[1] \n"
"vmla.f32 q12, q10, d15[0] \n"
"vmla.f32 q13, q10, d3[0] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d15[1] \n"
"vmla.f32 q13, q11, d3[1] \n"
"vmla.f32 q14, q11, d7[1] \n"
"vmla.f32 q15, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #128] \n"
"vld1.f32 {d12-d13}, [%4 :128] \n"// r310
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"// r40 r41 r42 r43
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #512] \n"
"vldm %5!, {d8-d15} \n"// r44 r45 r46 r47
"vmla.f32 q12, q8, d0[0] \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q9, d12[1] \n"
"vmla.f32 q12, q10, d1[0] \n"
"vmla.f32 q13, q10, d5[0] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"vmla.f32 q14, q11, d9[1] \n"
"vmla.f32 q15, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d2[0] \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q8, d10[0] \n"
"vmla.f32 q15, q8, d14[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q9, d10[1] \n"
"vmla.f32 q15, q9, d14[1] \n"
"vmla.f32 q12, q10, d3[0] \n"
"vmla.f32 q13, q10, d7[0] \n"
"vmla.f32 q14, q10, d11[0] \n"
"vmla.f32 q15, q10, d15[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"vmla.f32 q14, q11, d11[1] \n"
"vmla.f32 q15, q11, d15[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5 :128]! \n"// r48 r49
"vmla.f32 q12, q8, d4[0] \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q8, d12[0] \n"
"vmla.f32 q15, q8, d0[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q9, d12[1] \n"
"vmla.f32 q15, q9, d0[1] \n"
"vmla.f32 q12, q10, d5[0] \n"
"vmla.f32 q13, q10, d9[0] \n"
"vmla.f32 q14, q10, d13[0] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"vmla.f32 q14, q11, d13[1] \n"
"vmla.f32 q15, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q12, q8, d6[0] \n"
"vmla.f32 q13, q8, d10[0] \n"
"vmla.f32 q14, q8, d14[0] \n"
"vmla.f32 q15, q8, d2[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q9, d14[1] \n"
"vmla.f32 q15, q9, d2[1] \n"
"vmla.f32 q12, q10, d7[0] \n"
"vmla.f32 q13, q10, d11[0] \n"
"vmla.f32 q14, q10, d15[0] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"vmla.f32 q14, q11, d15[1] \n"
"vmla.f32 q15, q11, d3[1] \n"
// "pld [%6, #512] \n"
"vldm %6, {d16-d23} \n"
"pld [%5, #128] \n"
"vld1.f32 {d4-d5}, [%5 :128] \n"// r410
"vmla.f32 q12, q8, d8[0] \n"
"vmla.f32 q13, q8, d12[0] \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q9, d4[1] \n"
"vmla.f32 q12, q10, d9[0] \n"
"vmla.f32 q13, q10, d13[0] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vmla.f32 q14, q11, d1[1] \n"
"vmla.f32 q15, q11, d5[1] \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"sub %1, %1, #32 \n"
"sub %2, %2, #32 \n"
"sub %3, %3, #32 \n"
"sub %4, %4, #32 \n"
"sub %5, %5, #32 \n"
"vstm %0!, {d24-d31} \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j+1<outw; j+=2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v20.4s, v21.4s}, [%0] \n"// sum0 sum1
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"// r00 r01 r02 r03
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmul v22.4s, v16.4s, v0.s[0] \n"
"fmul v23.4s, v16.4s, v2.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%1] \n"// r04 r05 r06
"fmla v22.4s, v24.4s, v1.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"// r10 r11 r12 r13
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%2] \n"// r14 r15 r16
"fmla v22.4s, v16.4s, v1.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"// r20 r21 r22 r23
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%3] \n"// r24 r25 r26
"fmla v22.4s, v24.4s, v1.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4], #64 \n"// r30 r31 r32 r33
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fmla v22.4s, v24.4s, v0.s[0] \n"
"fmla v23.4s, v24.4s, v2.s[0] \n"
"fmla v20.4s, v25.4s, v0.s[1] \n"
"fmla v21.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v0.s[2] \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"fmla v21.4s, v27.4s, v2.s[3] \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%4] \n"// r34 r35 r36
"fmla v22.4s, v16.4s, v1.s[0] \n"
"fmla v23.4s, v16.4s, v3.s[0] \n"
"fmla v20.4s, v17.4s, v1.s[1] \n"
"fmla v21.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v1.s[2] \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v19.4s, v3.s[3] \n"
"fmla v22.4s, v24.4s, v2.s[0] \n"
"fmla v23.4s, v24.4s, v4.s[0] \n"
"fmla v20.4s, v25.4s, v2.s[1] \n"
"fmla v21.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v2.s[2] \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v27.4s, v4.s[3] \n"
"fmla v22.4s, v16.4s, v3.s[0] \n"
"fmla v23.4s, v16.4s, v5.s[0] \n"
"fmla v20.4s, v17.4s, v3.s[1] \n"
"fmla v21.4s, v17.4s, v5.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v3.s[2] \n"
"fmla v23.4s, v18.4s, v5.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"fmla v21.4s, v19.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"// r40 r41 r42 r43
"fmla v22.4s, v24.4s, v4.s[0] \n"
"fmla v23.4s, v24.4s, v6.s[0] \n"
"fmla v20.4s, v25.4s, v4.s[1] \n"
"fmla v21.4s, v25.4s, v6.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v4.s[2] \n"
"fmla v23.4s, v26.4s, v6.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v27.4s, v6.s[3] \n"
"fmla v22.4s, v16.4s, v0.s[0] \n"
"fmla v23.4s, v16.4s, v2.s[0] \n"
"fmla v20.4s, v17.4s, v0.s[1] \n"
"fmla v21.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v0.s[2] \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"fmla v21.4s, v19.4s, v2.s[3] \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v4.4s, v5.4s, v6.4s}, [%5] \n"// r44 r45 r46
"fmla v22.4s, v24.4s, v1.s[0] \n"
"fmla v23.4s, v24.4s, v3.s[0] \n"
"fmla v20.4s, v25.4s, v1.s[1] \n"
"fmla v21.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v22.4s, v26.4s, v1.s[2] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v27.4s, v3.s[3] \n"
"fmla v22.4s, v16.4s, v2.s[0] \n"
"fmla v23.4s, v16.4s, v4.s[0] \n"
"fmla v20.4s, v17.4s, v2.s[1] \n"
"fmla v21.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v22.4s, v18.4s, v2.s[2] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v19.4s, v4.s[3] \n"
"fmla v22.4s, v24.4s, v3.s[0] \n"
"fmla v23.4s, v24.4s, v5.s[0] \n"
"fmla v20.4s, v25.4s, v3.s[1] \n"
"fmla v21.4s, v25.4s, v5.s[1] \n"
// "prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n"
"fmla v22.4s, v26.4s, v3.s[2] \n"
"fmla v23.4s, v26.4s, v5.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v27.4s, v5.s[3] \n"
"fmla v22.4s, v16.4s, v4.s[0] \n"
"fmla v23.4s, v16.4s, v6.s[0] \n"
"fmla v20.4s, v17.4s, v4.s[1] \n"
"fmla v21.4s, v17.4s, v6.s[1] \n"
"fmla v22.4s, v18.4s, v4.s[2] \n"
"fmla v23.4s, v18.4s, v6.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v19.4s, v6.s[3] \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"st1 {v20.4s, v21.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
#else // __aarch64__
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128] \n"// sum0 sum1
"pld [%1, #512] \n"
"vldm %1!, {d0-d7} \n"// r00 r01 r02 r03
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmul.f32 q14, q8, d0[0] \n"
"vmul.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%1, #384] \n"
"vldm %1, {d8-d13} \n"// r04 r05 r06
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"// r10 r11 r12 r13
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #384] \n"
"vldm %2, {d8-d13} \n"// r14 r15 r16
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #512] \n"
"vldm %3!, {d0-d7} \n"// r20 r21 r22 r23
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #384] \n"
"vldm %3, {d8-d13} \n"// r24 r25 r26
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #512] \n"
"vldm %4!, {d0-d7} \n"// r30 r31 r32 r33
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #384] \n"
"vldm %4, {d8-d13} \n"// r34 r35 r36
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"// r40 r41 r42 r43
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d0[0] \n"
"vmla.f32 q15, q8, d4[0] \n"
"vmla.f32 q12, q9, d0[1] \n"
"vmla.f32 q13, q9, d4[1] \n"
"vmla.f32 q14, q10, d1[0] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"vmla.f32 q13, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #384] \n"
"vldm %5, {d8-d13} \n"// r44 r45 r46
"vmla.f32 q14, q8, d2[0] \n"
"vmla.f32 q15, q8, d6[0] \n"
"vmla.f32 q12, q9, d2[1] \n"
"vmla.f32 q13, q9, d6[1] \n"
"vmla.f32 q14, q10, d3[0] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"vmla.f32 q13, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d4[0] \n"
"vmla.f32 q15, q8, d8[0] \n"
"vmla.f32 q12, q9, d4[1] \n"
"vmla.f32 q13, q9, d8[1] \n"
"vmla.f32 q14, q10, d5[0] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"vmla.f32 q13, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q14, q8, d6[0] \n"
"vmla.f32 q15, q8, d10[0] \n"
"vmla.f32 q12, q9, d6[1] \n"
"vmla.f32 q13, q9, d10[1] \n"
"vmla.f32 q14, q10, d7[0] \n"
"vmla.f32 q15, q10, d11[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"vmla.f32 q13, q11, d11[1] \n"
// "pld [%6, #512] \n"
"vldm %6, {d16-d23} \n"
"vmla.f32 q14, q8, d8[0] \n"
"vmla.f32 q15, q8, d12[0] \n"
"vmla.f32 q12, q9, d8[1] \n"
"vmla.f32 q13, q9, d12[1] \n"
"vmla.f32 q14, q10, d9[0] \n"
"vmla.f32 q15, q10, d13[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vmla.f32 q13, q11, d13[1] \n"
"vadd.f32 q12, q12, q14 \n"
"vadd.f32 q13, q13, q15 \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"vst1.f32 {d24-d27}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; j<outw; j++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n"// sum0
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"// r00 r01
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmul v21.4s, v16.4s, v0.s[0] \n"
"fmul v22.4s, v17.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmul v23.4s, v18.4s, v0.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%1, #384] \n"
"ld1 {v2.4s, v3.4s, v4.4s}, [%1] \n"// r02 r03 r04
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n"// r10 r11
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%2, #384] \n"
"ld1 {v2.4s, v3.4s, v4.4s}, [%2] \n"// r12 r13 r14
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n"// r20 r21
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%3, #384] \n"
"ld1 {v2.4s, v3.4s, v4.4s}, [%3] \n"// r22 r23 r24
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v0.4s, v1.4s}, [%4], #32 \n"// r30 r31
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fmla v21.4s, v24.4s, v0.s[0] \n"
"fmla v22.4s, v25.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v0.s[2] \n"
"fmla v20.4s, v27.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #384] \n"
"ld1 {v2.4s, v3.4s, v4.4s}, [%4] \n"// r32 r33 r34
"fmla v21.4s, v16.4s, v1.s[0] \n"
"fmla v22.4s, v17.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v1.s[2] \n"
"fmla v20.4s, v19.4s, v1.s[3] \n"
"fmla v21.4s, v24.4s, v2.s[0] \n"
"fmla v22.4s, v25.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v2.s[2] \n"
"fmla v20.4s, v27.4s, v2.s[3] \n"
"fmla v21.4s, v16.4s, v3.s[0] \n"
"fmla v22.4s, v17.4s, v3.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v3.s[2] \n"
"fmla v20.4s, v19.4s, v3.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4s, v1.4s}, [%5], #32 \n"// r40 r41
"fmla v21.4s, v24.4s, v4.s[0] \n"
"fmla v22.4s, v25.4s, v4.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v4.s[2] \n"
"fmla v20.4s, v27.4s, v4.s[3] \n"
"fmla v21.4s, v16.4s, v0.s[0] \n"
"fmla v22.4s, v17.4s, v0.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v0.s[2] \n"
"fmla v20.4s, v19.4s, v0.s[3] \n"
"prfm pldl1keep, [%5, #384] \n"
"ld1 {v2.4s, v3.4s, v4.4s}, [%5] \n"// r42 r43 r44
"fmla v21.4s, v24.4s, v1.s[0] \n"
"fmla v22.4s, v25.4s, v1.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6], #64 \n"
"fmla v23.4s, v26.4s, v1.s[2] \n"
"fmla v20.4s, v27.4s, v1.s[3] \n"
"fmla v21.4s, v16.4s, v2.s[0] \n"
"fmla v22.4s, v17.4s, v2.s[1] \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%6], #64 \n"
"fmla v23.4s, v18.4s, v2.s[2] \n"
"fmla v20.4s, v19.4s, v2.s[3] \n"
"fmla v21.4s, v24.4s, v3.s[0] \n"
"fmla v22.4s, v25.4s, v3.s[1] \n"
// "prfm pldl1keep, [%6, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%6] \n"
"fmla v23.4s, v26.4s, v3.s[2] \n"
"fmla v20.4s, v27.4s, v3.s[3] \n"
"fmla v21.4s, v16.4s, v4.s[0] \n"
"fmla v22.4s, v17.4s, v4.s[1] \n"
"fmla v23.4s, v18.4s, v4.s[2] \n"
"fmla v20.4s, v19.4s, v4.s[3] \n"
"fadd v22.4s, v21.4s, v22.4s \n"
"fadd v23.4s, v22.4s, v23.4s \n"
"fadd v20.4s, v20.4s, v23.4s \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"st1 {v20.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
#else // __aarch64__
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d24-d25}, [%0 :128] \n"// sum0
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"// r00 r01
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmul.f32 q13, q8, d0[0] \n"
"vmul.f32 q14, q9, d0[1] \n"
"vmul.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%1, #384] \n"
"vldm %1, {d4-d9} \n"// r02 r03 r04
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n"// r10 r11
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%2, #384] \n"
"vldm %2, {d4-d9} \n"// r12 r13 r14
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"// r20 r21
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%3, #384] \n"
"vldm %3, {d4-d9} \n"// r22 r23 r24
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #256] \n"
"vld1.f32 {d0-d3}, [%4 :128]! \n"// r30 r31
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%4, #384] \n"
"vldm %4, {d4-d9} \n"// r32 r33 r34
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5 :128]! \n"// r40 r41
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d0[0] \n"
"vmla.f32 q14, q9, d0[1] \n"
"vmla.f32 q15, q10, d1[0] \n"
"vmla.f32 q12, q11, d1[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"pld [%5, #384] \n"
"vldm %5, {d4-d9} \n"// r42 r43 r44
"vmla.f32 q13, q8, d2[0] \n"
"vmla.f32 q14, q9, d2[1] \n"
"vmla.f32 q15, q10, d3[0] \n"
"vmla.f32 q12, q11, d3[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d4[0] \n"
"vmla.f32 q14, q9, d4[1] \n"
"vmla.f32 q15, q10, d5[0] \n"
"vmla.f32 q12, q11, d5[1] \n"
"pld [%6, #512] \n"
"vldm %6!, {d16-d23} \n"
"vmla.f32 q13, q8, d6[0] \n"
"vmla.f32 q14, q9, d6[1] \n"
"vmla.f32 q15, q10, d7[0] \n"
"vmla.f32 q12, q11, d7[1] \n"
// "pld [%6, #512] \n"
"vldm %6, {d16-d23} \n"
"vmla.f32 q13, q8, d8[0] \n"
"vmla.f32 q14, q9, d8[1] \n"
"vmla.f32 q15, q10, d9[0] \n"
"vmla.f32 q12, q11, d9[1] \n"
"vadd.f32 q14, q13, q14 \n"
"vadd.f32 q15, q14, q15 \n"
"vadd.f32 q12, q12, q15 \n"
"sub %6, %6, #1536 \n"// kptr -= 24 * 16;
"vst1.f32 {d24-d25}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(r4), // %5
"=r"(kptr) // %6
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(r4),
"6"(kptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
}
}
|
LogSoftMax.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/LogSoftMax.c"
#else
#ifdef _MSC_VER
#define LOG_SOFTMAX_SIZE_TYPE int64_t
#define LOG_SOFTMAX_CAST_TYPE (int64_t)
#else
#define LOG_SOFTMAX_SIZE_TYPE uint64_t
#define LOG_SOFTMAX_CAST_TYPE
#endif
void THNN_(LogSoftMax_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
int dim)
{
THArgCheck(dim >= 0 && dim < input->nDimension, 4,
"dim out of range (got %d, but input has %d dims)", dim, input->nDimension);
uint64_t outer_size = 1;
uint64_t dim_size = input->size[dim];
uint64_t inner_size = 1;
for (uint64_t i = 0; i < dim; ++i)
outer_size *= input->size[i];
for (uint64_t i = dim + 1; i < input->nDimension; ++i)
inner_size *= input->size[i];
input = THTensor_(newContiguous)(input);
THTensor_(resizeAs)(output, input);
real *input_data_base = THTensor_(data)(input);
real *output_data_base = THTensor_(data)(output);
uint64_t dim_stride = inner_size;
uint64_t outer_stride = dim_size * dim_stride;
LOG_SOFTMAX_SIZE_TYPE i, d;
#pragma omp parallel for private(i, d)
for (i = 0; i < LOG_SOFTMAX_CAST_TYPE (outer_size * inner_size); i++)
{
uint64_t outer_idx = i / inner_size;
uint64_t inner_idx = i % inner_size;
real *input_data = input_data_base + outer_idx * outer_stride + inner_idx;
real *output_data = output_data_base + outer_idx * outer_stride + inner_idx;
real max_input = -THInf;
for (d = 0; d < LOG_SOFTMAX_CAST_TYPE dim_size; d++)
max_input = THMax(max_input, input_data[d * dim_stride]);
accreal logsum = 0;
for (d = 0; d < LOG_SOFTMAX_CAST_TYPE dim_size; d++)
logsum += exp(input_data[d * dim_stride] - max_input);
logsum = max_input + log(logsum);
for (d = 0; d < LOG_SOFTMAX_CAST_TYPE dim_size; d++)
output_data[d * dim_stride] = input_data[d * dim_stride] - logsum;
}
THTensor_(free)(input);
}
void THNN_(LogSoftMax_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output,
int dim)
{
THNN_CHECK_SHAPE(output, gradOutput);
THArgCheck(dim >= 0 && dim < output->nDimension, 6,
"dim out of range (got %d, but input has %d dims)", dim, output->nDimension);
uint64_t outer_size = 1;
uint64_t dim_size = output->size[dim];
uint64_t inner_size = 1;
for (uint64_t i = 0; i < dim; ++i)
outer_size *= output->size[i];
for (uint64_t i = dim + 1; i < output->nDimension; ++i)
inner_size *= output->size[i];
gradOutput = THTensor_(newContiguous)(gradOutput);
output = THTensor_(newContiguous)(output);
THTensor_(resizeAs)(gradInput, output);
real *gradInput_data_base = THTensor_(data)(gradInput);
real *output_data_base = THTensor_(data)(output);
real *gradOutput_data_base = THTensor_(data)(gradOutput);
uint64_t dim_stride = inner_size;
uint64_t outer_stride = dim_size * dim_stride;
LOG_SOFTMAX_SIZE_TYPE i, d;
#pragma omp parallel for private(i, d)
for (i = 0; i < LOG_SOFTMAX_CAST_TYPE (outer_size * inner_size); i++)
{
uint64_t outer_idx = i / inner_size;
uint64_t inner_idx = i % inner_size;
real *gradInput_data = gradInput_data_base + outer_idx * outer_stride + inner_idx;
real *output_data = output_data_base + outer_idx * outer_stride + inner_idx;
real *gradOutput_data = gradOutput_data_base + outer_idx * outer_stride + inner_idx;
accreal sum = 0;
for (d = 0; d < LOG_SOFTMAX_CAST_TYPE dim_size; d++)
sum += gradOutput_data[d * dim_stride];
for (d = 0; d < LOG_SOFTMAX_CAST_TYPE dim_size; d++)
gradInput_data[d * dim_stride] = gradOutput_data[d * dim_stride] - exp(output_data[d * dim_stride]) * sum;
}
THTensor_(free)(gradOutput);
THTensor_(free)(output);
}
#endif
|
test_funcs.h | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
template <typename type, typename type2>
static void basic_gemm(bool trans_a,
bool trans_b,
int m,
int n,
int k,
type2 alpha,
const type* a,
int lda,
const type* b,
int ldb,
type2 beta,
type2* c,
int ldc,
const type2* bias,
bool flag_bias = false,
bool flag_relu = false) {
#pragma omp parallel for
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
for (int j = 0; j < n; ++j) {
auto sum = static_cast<type2>(0);
for (int l = 0; l < k; ++l) {
type av;
type bv;
if (trans_a) {
av = a[l * lda + i];
} else {
av = a[i * lda + l];
}
if (trans_b) {
bv = b[j * ldb + l];
} else {
bv = b[l * ldb + j];
}
sum += av * bv;
}
type2 tmp = alpha * sum + beta * c[i * ldc + j] + bias_data;
if (flag_relu) {
c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0;
} else {
c[i * ldc + j] = tmp;
}
}
}
}
template <typename type, typename type2>
static void basic_gemv(int m,
int k,
const type* a,
const type* b,
const type2* bias,
type2* c,
type2 alpha,
type2 beta,
bool trans_a = false,
bool flag_bias = false,
bool flag_relu = false) {
#pragma omp parallel for
for (int i = 0; i < m; ++i) {
auto bias_data = static_cast<type2>(0);
if (flag_bias) {
bias_data = bias[i];
}
auto sum = static_cast<type2>(0);
for (int j = 0; j < k; ++j) {
type av;
if (trans_a) {
av = a[j * m + i];
} else {
av = a[i * k + j];
}
sum += av * b[j];
}
type2 tmp = alpha * sum + beta * c[i] + bias_data;
if (flag_relu) {
c[i] = tmp > (type2)0 ? tmp : (type2)0;
} else {
c[i] = tmp;
}
}
}
/**
* \brief basic direct convolution function
*/
//! for float, dtype1 and type2 is float
//! for int8, dytpe1 is char, dtype2 is int
template <typename Dtype1, typename Dtype2>
static void conv_basic(const Dtype1* din,
Dtype2* dout,
int num,
int chout,
int hout,
int wout,
int chin,
int hin,
int win,
const Dtype1* weights,
const Dtype2* bias,
int group,
int kernel_w,
int kernel_h,
int stride_w,
int stride_h,
int dila_w,
int dila_h,
int pad_w,
int pad_h,
bool flag_bias,
bool flag_relu) {
Dtype2 beta = 0;
auto src_data = din;
auto dst_data_ref = dout;
auto weights_data = weights;
auto with_bias = flag_bias;
auto bias_data = bias;
int in_num = num;
int out_channels = chout;
int out_h = hout;
int out_w = wout;
int in_channel = chin;
int in_h = hin;
int in_w = win;
int out_c_group = out_channels / group;
int in_c_group = in_channel / group;
for (int n = 0; n < in_num; ++n) {
#pragma omp parallel for collapse(4)
for (int g = 0; g < group; ++g) {
for (int oc = 0; oc < out_c_group; ++oc) {
for (int oh = 0; oh < out_h; ++oh) {
for (int ow = 0; ow < out_w; ++ow) {
int out_idx = n * group * out_c_group * out_h * out_w +
g * out_c_group * out_h * out_w + oc * out_h * out_w +
oh * out_w + ow;
Dtype2 bias_d = with_bias ? (bias_data[g * out_c_group + oc]) : 0;
dst_data_ref[out_idx] = bias_d; // + dst_data_ref[out_idx] * beta;
for (int ic = 0; ic < in_c_group; ++ic) {
for (int kh = 0; kh < kernel_h; ++kh) {
for (int kw = 0; kw < kernel_w; ++kw) {
int iw = ow * stride_w - pad_w + kw * (dila_w);
int ih = oh * stride_h - pad_h + kh * (dila_h);
if (iw < 0 || iw >= in_w) continue;
if (ih < 0 || ih >= in_h) continue;
int iidx = n * in_channel * in_h * in_w +
g * in_c_group * in_h * in_w + ic * in_h * in_w +
ih * in_w + iw;
int widx =
g * out_c_group * in_c_group * kernel_h * kernel_w +
oc * in_c_group * kernel_h * kernel_w +
ic * kernel_h * kernel_w + kh * kernel_w + kw;
dst_data_ref[out_idx] += src_data[iidx] * weights_data[widx];
}
}
}
if (flag_relu) {
dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0
? dst_data_ref[out_idx]
: (Dtype2)0;
}
}
}
}
}
}
}
|
otfft_avxdit16omp.h | // Copyright (c) 2015, OK おじさん(岡久卓也)
// Copyright (c) 2015, OK Ojisan(Takuya OKAHISA)
// Copyright (c) 2017 to the present, DEWETRON GmbH
// OTFFT Implementation Version 9.5
// based on Stockham FFT algorithm
// from OK Ojisan(Takuya OKAHISA), source: http://www.moon.sannet.ne.jp/okahisa/stockham/stockham.html
#pragma once
#include "otfft_misc.h"
namespace OTFFT_NAMESPACE {
namespace OTFFT_AVXDIT16omp { /////////////////////////////////////////////////
using namespace OTFFT;
using namespace OTFFT_MISC;
///////////////////////////////////////////////////////////////////////////////
// Forward Buffterfly Operation
///////////////////////////////////////////////////////////////////////////////
template <int n, int s> struct fwdcore
{
static const int N = n*s;
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#pragma omp for schedule(static)
for (int i = 0; i < N/32; i++) {
const int p = i / (s/2);
const int q = i % (s/2) * 2;
const int sp = s*p;
const int s16p = 16*sp;
complex_vector xq_sp = x + q + sp;
complex_vector yq_s16p = y + q + s16p;
const ymm y0 = getpz2(yq_s16p+s*0x0);
const ymm w1p = duppz3(W[sp]);
const ymm y1 = mulpz2(w1p, getpz2(yq_s16p+s*0x1));
const ymm w2p = mulpz2(w1p, w1p);
const ymm y2 = mulpz2(w2p, getpz2(yq_s16p+s*0x2));
const ymm w3p = mulpz2(w1p, w2p);
const ymm y3 = mulpz2(w3p, getpz2(yq_s16p+s*0x3));
const ymm w4p = mulpz2(w2p, w2p);
const ymm y4 = mulpz2(w4p, getpz2(yq_s16p+s*0x4));
const ymm w5p = mulpz2(w2p, w3p);
const ymm y5 = mulpz2(w5p, getpz2(yq_s16p+s*0x5));
const ymm w6p = mulpz2(w3p, w3p);
const ymm y6 = mulpz2(w6p, getpz2(yq_s16p+s*0x6));
const ymm w7p = mulpz2(w3p, w4p);
const ymm y7 = mulpz2(w7p, getpz2(yq_s16p+s*0x7));
const ymm w8p = mulpz2(w4p, w4p);
const ymm y8 = mulpz2(w8p, getpz2(yq_s16p+s*0x8));
const ymm w9p = mulpz2(w4p, w5p);
const ymm y9 = mulpz2(w9p, getpz2(yq_s16p+s*0x9));
const ymm wap = mulpz2(w5p, w5p);
const ymm ya = mulpz2(wap, getpz2(yq_s16p+s*0xa));
const ymm wbp = mulpz2(w5p, w6p);
const ymm yb = mulpz2(wbp, getpz2(yq_s16p+s*0xb));
const ymm wcp = mulpz2(w6p, w6p);
const ymm yc = mulpz2(wcp, getpz2(yq_s16p+s*0xc));
const ymm wdp = mulpz2(w6p, w7p);
const ymm yd = mulpz2(wdp, getpz2(yq_s16p+s*0xd));
const ymm wep = mulpz2(w7p, w7p);
const ymm ye = mulpz2(wep, getpz2(yq_s16p+s*0xe));
const ymm wfp = mulpz2(w7p, w8p);
const ymm yf = mulpz2(wfp, getpz2(yq_s16p+s*0xf));
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq_sp+N0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq_sp+N1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq_sp+N2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq_sp+N3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq_sp+N4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq_sp+N5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq_sp+N6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq_sp+N7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq_sp+N8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq_sp+N9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq_sp+Na, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq_sp+Nb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq_sp+Nc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq_sp+Nd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq_sp+Ne, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq_sp+Nf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
template <int N> struct fwdcore<N,1>
{
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#pragma omp for schedule(static) nowait
for (int p = 0; p < N1; p += 2) {
complex_vector x_p = x + p;
complex_vector y_16p = y + 16*p;
const ymm w1p = getpz2(W+p);
const ymm w2p = mulpz2(w1p, w1p);
const ymm w3p = mulpz2(w1p, w2p);
const ymm w4p = mulpz2(w2p, w2p);
const ymm w5p = mulpz2(w2p, w3p);
const ymm w6p = mulpz2(w3p, w3p);
const ymm w7p = mulpz2(w3p, w4p);
const ymm w8p = mulpz2(w4p, w4p);
const ymm w9p = mulpz2(w4p, w5p);
const ymm wap = mulpz2(w5p, w5p);
const ymm wbp = mulpz2(w5p, w6p);
const ymm wcp = mulpz2(w6p, w6p);
const ymm wdp = mulpz2(w6p, w7p);
const ymm wep = mulpz2(w7p, w7p);
const ymm wfp = mulpz2(w7p, w8p);
const ymm ab = getpz2(y_16p+0x00);
const ymm cd = getpz2(y_16p+0x02);
const ymm ef = getpz2(y_16p+0x04);
const ymm gh = getpz2(y_16p+0x06);
const ymm ij = getpz2(y_16p+0x08);
const ymm kl = getpz2(y_16p+0x0a);
const ymm mn = getpz2(y_16p+0x0c);
const ymm op = getpz2(y_16p+0x0e);
const ymm AB = getpz2(y_16p+0x10);
const ymm CD = getpz2(y_16p+0x12);
const ymm EF = getpz2(y_16p+0x14);
const ymm GH = getpz2(y_16p+0x16);
const ymm IJ = getpz2(y_16p+0x18);
const ymm KL = getpz2(y_16p+0x1a);
const ymm MN = getpz2(y_16p+0x1c);
const ymm OP = getpz2(y_16p+0x1e);
const ymm y0 = catlo(ab, AB);
const ymm y1 = mulpz2(w1p, cathi(ab, AB));
const ymm y2 = mulpz2(w2p, catlo(cd, CD));
const ymm y3 = mulpz2(w3p, cathi(cd, CD));
const ymm y4 = mulpz2(w4p, catlo(ef, EF));
const ymm y5 = mulpz2(w5p, cathi(ef, EF));
const ymm y6 = mulpz2(w6p, catlo(gh, GH));
const ymm y7 = mulpz2(w7p, cathi(gh, GH));
const ymm y8 = mulpz2(w8p, catlo(ij, IJ));
const ymm y9 = mulpz2(w9p, cathi(ij, IJ));
const ymm ya = mulpz2(wap, catlo(kl, KL));
const ymm yb = mulpz2(wbp, cathi(kl, KL));
const ymm yc = mulpz2(wcp, catlo(mn, MN));
const ymm yd = mulpz2(wdp, cathi(mn, MN));
const ymm ye = mulpz2(wep, catlo(op, OP));
const ymm yf = mulpz2(wfp, cathi(op, OP));
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(x_p+N0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(x_p+N1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(x_p+N2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(x_p+N3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(x_p+N4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(x_p+N5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(x_p+N6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(x_p+N7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(x_p+N8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(x_p+N9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(x_p+Na, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(x_p+Nb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(x_p+Nc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(x_p+Nd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(x_p+Ne, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(x_p+Nf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo, int mode> struct fwdend;
//-----------------------------------------------------------------------------
template <int s, bool eo, int mode> struct fwdend<16,s,eo,mode>
{
static const int N = 16*s;
void operator()(complex_vector x, complex_vector y) const noexcept
{
complex_vector z = eo ? y : x;
#pragma omp for schedule(static)
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector zq = z + q;
const ymm z0 = scalepz2<N,mode>(getpz2(zq+s*0x0));
const ymm z1 = scalepz2<N,mode>(getpz2(zq+s*0x1));
const ymm z2 = scalepz2<N,mode>(getpz2(zq+s*0x2));
const ymm z3 = scalepz2<N,mode>(getpz2(zq+s*0x3));
const ymm z4 = scalepz2<N,mode>(getpz2(zq+s*0x4));
const ymm z5 = scalepz2<N,mode>(getpz2(zq+s*0x5));
const ymm z6 = scalepz2<N,mode>(getpz2(zq+s*0x6));
const ymm z7 = scalepz2<N,mode>(getpz2(zq+s*0x7));
const ymm z8 = scalepz2<N,mode>(getpz2(zq+s*0x8));
const ymm z9 = scalepz2<N,mode>(getpz2(zq+s*0x9));
const ymm za = scalepz2<N,mode>(getpz2(zq+s*0xa));
const ymm zb = scalepz2<N,mode>(getpz2(zq+s*0xb));
const ymm zc = scalepz2<N,mode>(getpz2(zq+s*0xc));
const ymm zd = scalepz2<N,mode>(getpz2(zq+s*0xd));
const ymm ze = scalepz2<N,mode>(getpz2(zq+s*0xe));
const ymm zf = scalepz2<N,mode>(getpz2(zq+s*0xf));
const ymm a08 = addpz2(z0, z8); const ymm s08 = subpz2(z0, z8);
const ymm a4c = addpz2(z4, zc); const ymm s4c = subpz2(z4, zc);
const ymm a2a = addpz2(z2, za); const ymm s2a = subpz2(z2, za);
const ymm a6e = addpz2(z6, ze); const ymm s6e = subpz2(z6, ze);
const ymm a19 = addpz2(z1, z9); const ymm s19 = subpz2(z1, z9);
const ymm a5d = addpz2(z5, zd); const ymm s5d = subpz2(z5, zd);
const ymm a3b = addpz2(z3, zb); const ymm s3b = subpz2(z3, zb);
const ymm a7f = addpz2(z7, zf); const ymm s7f = subpz2(z7, zf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x4, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xc, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
template <bool eo, int mode> struct fwdend<16,1,eo,mode>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
#pragma omp single
{
zeroupper();
complex_vector z = eo ? y : x;
const xmm z0 = scalepz<16,mode>(getpz(z[0x0]));
const xmm z1 = scalepz<16,mode>(getpz(z[0x1]));
const xmm z2 = scalepz<16,mode>(getpz(z[0x2]));
const xmm z3 = scalepz<16,mode>(getpz(z[0x3]));
const xmm z4 = scalepz<16,mode>(getpz(z[0x4]));
const xmm z5 = scalepz<16,mode>(getpz(z[0x5]));
const xmm z6 = scalepz<16,mode>(getpz(z[0x6]));
const xmm z7 = scalepz<16,mode>(getpz(z[0x7]));
const xmm z8 = scalepz<16,mode>(getpz(z[0x8]));
const xmm z9 = scalepz<16,mode>(getpz(z[0x9]));
const xmm za = scalepz<16,mode>(getpz(z[0xa]));
const xmm zb = scalepz<16,mode>(getpz(z[0xb]));
const xmm zc = scalepz<16,mode>(getpz(z[0xc]));
const xmm zd = scalepz<16,mode>(getpz(z[0xd]));
const xmm ze = scalepz<16,mode>(getpz(z[0xe]));
const xmm zf = scalepz<16,mode>(getpz(z[0xf]));
const xmm a08 = addpz(z0, z8); const xmm s08 = subpz(z0, z8);
const xmm a4c = addpz(z4, zc); const xmm s4c = subpz(z4, zc);
const xmm a2a = addpz(z2, za); const xmm s2a = subpz(z2, za);
const xmm a6e = addpz(z6, ze); const xmm s6e = subpz(z6, ze);
const xmm a19 = addpz(z1, z9); const xmm s19 = subpz(z1, z9);
const xmm a5d = addpz(z5, zd); const xmm s5d = subpz(z5, zd);
const xmm a3b = addpz(z3, zb); const xmm s3b = subpz(z3, zb);
const xmm a7f = addpz(z7, zf); const xmm s7f = subpz(z7, zf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x2], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x3], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x4], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x6], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x7], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0xa], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xb], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xc], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xe], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xf], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
// Forward FFT
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo, int mode> struct fwdfft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
fwdfft<n/16,16*s,!eo,mode>()(y, x, W);
fwdcore<n,s>()(x, y, W);
}
};
template <int s, bool eo, int mode> struct fwdfft<16,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
fwdend<16,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct fwdfft<8,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT8omp::fwdend<8,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct fwdfft<4,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::fwdend<4,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct fwdfft<2,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::fwdend<2,s,eo,mode>()(x, y);
}
};
///////////////////////////////////////////////////////////////////////////////
// Inverse Butterfly Operation
///////////////////////////////////////////////////////////////////////////////
template <int n, int s> struct invcore
{
static const int N = n*s;
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#pragma omp for schedule(static)
for (int i = 0; i < N/32; i++) {
const int p = i / (s/2);
const int q = i % (s/2) * 2;
const int sp = s*p;
const int s16p = 16*sp;
complex_vector xq_sp = x + q + sp;
complex_vector yq_s16p = y + q + s16p;
const ymm y0 = getpz2(yq_s16p+s*0x0);
const ymm w1p = duppz3(W[N-sp]);
const ymm y1 = mulpz2(w1p, getpz2(yq_s16p+s*0x1));
const ymm w2p = mulpz2(w1p, w1p);
const ymm y2 = mulpz2(w2p, getpz2(yq_s16p+s*0x2));
const ymm w3p = mulpz2(w1p, w2p);
const ymm y3 = mulpz2(w3p, getpz2(yq_s16p+s*0x3));
const ymm w4p = mulpz2(w2p, w2p);
const ymm y4 = mulpz2(w4p, getpz2(yq_s16p+s*0x4));
const ymm w5p = mulpz2(w2p, w3p);
const ymm y5 = mulpz2(w5p, getpz2(yq_s16p+s*0x5));
const ymm w6p = mulpz2(w3p, w3p);
const ymm y6 = mulpz2(w6p, getpz2(yq_s16p+s*0x6));
const ymm w7p = mulpz2(w3p, w4p);
const ymm y7 = mulpz2(w7p, getpz2(yq_s16p+s*0x7));
const ymm w8p = mulpz2(w4p, w4p);
const ymm y8 = mulpz2(w8p, getpz2(yq_s16p+s*0x8));
const ymm w9p = mulpz2(w4p, w5p);
const ymm y9 = mulpz2(w9p, getpz2(yq_s16p+s*0x9));
const ymm wap = mulpz2(w5p, w5p);
const ymm ya = mulpz2(wap, getpz2(yq_s16p+s*0xa));
const ymm wbp = mulpz2(w5p, w6p);
const ymm yb = mulpz2(wbp, getpz2(yq_s16p+s*0xb));
const ymm wcp = mulpz2(w6p, w6p);
const ymm yc = mulpz2(wcp, getpz2(yq_s16p+s*0xc));
const ymm wdp = mulpz2(w6p, w7p);
const ymm yd = mulpz2(wdp, getpz2(yq_s16p+s*0xd));
const ymm wep = mulpz2(w7p, w7p);
const ymm ye = mulpz2(wep, getpz2(yq_s16p+s*0xe));
const ymm wfp = mulpz2(w7p, w8p);
const ymm yf = mulpz2(wfp, getpz2(yq_s16p+s*0xf));
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq_sp+N0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq_sp+N1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq_sp+N2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq_sp+N3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq_sp+N4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq_sp+N5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq_sp+N6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq_sp+N7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq_sp+N8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq_sp+N9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq_sp+Na, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq_sp+Nb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq_sp+Nc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq_sp+Nd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq_sp+Ne, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq_sp+Nf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
template <int N> struct invcore<N,1>
{
static const int N0 = 0;
static const int N1 = N/16;
static const int N2 = N1*2;
static const int N3 = N1*3;
static const int N4 = N1*4;
static const int N5 = N1*5;
static const int N6 = N1*6;
static const int N7 = N1*7;
static const int N8 = N1*8;
static const int N9 = N1*9;
static const int Na = N1*10;
static const int Nb = N1*11;
static const int Nc = N1*12;
static const int Nd = N1*13;
static const int Ne = N1*14;
static const int Nf = N1*15;
void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
#pragma omp for schedule(static) nowait
for (int p = 0; p < N1; p += 2) {
complex_vector x_p = x + p;
complex_vector y_16p = y + 16*p;
const ymm w1p = cnjpz2(getpz2(W+p));
const ymm w2p = mulpz2(w1p, w1p);
const ymm w3p = mulpz2(w1p, w2p);
const ymm w4p = mulpz2(w2p, w2p);
const ymm w5p = mulpz2(w2p, w3p);
const ymm w6p = mulpz2(w3p, w3p);
const ymm w7p = mulpz2(w3p, w4p);
const ymm w8p = mulpz2(w4p, w4p);
const ymm w9p = mulpz2(w4p, w5p);
const ymm wap = mulpz2(w5p, w5p);
const ymm wbp = mulpz2(w5p, w6p);
const ymm wcp = mulpz2(w6p, w6p);
const ymm wdp = mulpz2(w6p, w7p);
const ymm wep = mulpz2(w7p, w7p);
const ymm wfp = mulpz2(w7p, w8p);
const ymm ab = getpz2(y_16p+0x00);
const ymm cd = getpz2(y_16p+0x02);
const ymm ef = getpz2(y_16p+0x04);
const ymm gh = getpz2(y_16p+0x06);
const ymm ij = getpz2(y_16p+0x08);
const ymm kl = getpz2(y_16p+0x0a);
const ymm mn = getpz2(y_16p+0x0c);
const ymm op = getpz2(y_16p+0x0e);
const ymm AB = getpz2(y_16p+0x10);
const ymm CD = getpz2(y_16p+0x12);
const ymm EF = getpz2(y_16p+0x14);
const ymm GH = getpz2(y_16p+0x16);
const ymm IJ = getpz2(y_16p+0x18);
const ymm KL = getpz2(y_16p+0x1a);
const ymm MN = getpz2(y_16p+0x1c);
const ymm OP = getpz2(y_16p+0x1e);
const ymm y0 = catlo(ab, AB);
const ymm y1 = mulpz2(w1p, cathi(ab, AB));
const ymm y2 = mulpz2(w2p, catlo(cd, CD));
const ymm y3 = mulpz2(w3p, cathi(cd, CD));
const ymm y4 = mulpz2(w4p, catlo(ef, EF));
const ymm y5 = mulpz2(w5p, cathi(ef, EF));
const ymm y6 = mulpz2(w6p, catlo(gh, GH));
const ymm y7 = mulpz2(w7p, cathi(gh, GH));
const ymm y8 = mulpz2(w8p, catlo(ij, IJ));
const ymm y9 = mulpz2(w9p, cathi(ij, IJ));
const ymm ya = mulpz2(wap, catlo(kl, KL));
const ymm yb = mulpz2(wbp, cathi(kl, KL));
const ymm yc = mulpz2(wcp, catlo(mn, MN));
const ymm yd = mulpz2(wdp, cathi(mn, MN));
const ymm ye = mulpz2(wep, catlo(op, OP));
const ymm yf = mulpz2(wfp, cathi(op, OP));
const ymm a08 = addpz2(y0, y8); const ymm s08 = subpz2(y0, y8);
const ymm a4c = addpz2(y4, yc); const ymm s4c = subpz2(y4, yc);
const ymm a2a = addpz2(y2, ya); const ymm s2a = subpz2(y2, ya);
const ymm a6e = addpz2(y6, ye); const ymm s6e = subpz2(y6, ye);
const ymm a19 = addpz2(y1, y9); const ymm s19 = subpz2(y1, y9);
const ymm a5d = addpz2(y5, yd); const ymm s5d = subpz2(y5, yd);
const ymm a3b = addpz2(y3, yb); const ymm s3b = subpz2(y3, yb);
const ymm a7f = addpz2(y7, yf); const ymm s7f = subpz2(y7, yf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(x_p+N0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(x_p+N1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(x_p+N2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(x_p+N3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(x_p+N4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(x_p+N5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(x_p+N6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(x_p+N7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(x_p+N8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(x_p+N9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(x_p+Na, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(x_p+Nb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(x_p+Nc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(x_p+Nd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(x_p+Ne, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(x_p+Nf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo, int mode> struct invend;
//-----------------------------------------------------------------------------
template <int s, bool eo, int mode> struct invend<16,s,eo,mode>
{
static const int N = 16*s;
void operator()(complex_vector x, complex_vector y) const noexcept
{
complex_vector z = eo ? y : x;
#pragma omp for schedule(static)
for (int q = 0; q < s; q += 2) {
complex_vector xq = x + q;
complex_vector zq = z + q;
const ymm z0 = scalepz2<N,mode>(getpz2(zq+s*0x0));
const ymm z1 = scalepz2<N,mode>(getpz2(zq+s*0x1));
const ymm z2 = scalepz2<N,mode>(getpz2(zq+s*0x2));
const ymm z3 = scalepz2<N,mode>(getpz2(zq+s*0x3));
const ymm z4 = scalepz2<N,mode>(getpz2(zq+s*0x4));
const ymm z5 = scalepz2<N,mode>(getpz2(zq+s*0x5));
const ymm z6 = scalepz2<N,mode>(getpz2(zq+s*0x6));
const ymm z7 = scalepz2<N,mode>(getpz2(zq+s*0x7));
const ymm z8 = scalepz2<N,mode>(getpz2(zq+s*0x8));
const ymm z9 = scalepz2<N,mode>(getpz2(zq+s*0x9));
const ymm za = scalepz2<N,mode>(getpz2(zq+s*0xa));
const ymm zb = scalepz2<N,mode>(getpz2(zq+s*0xb));
const ymm zc = scalepz2<N,mode>(getpz2(zq+s*0xc));
const ymm zd = scalepz2<N,mode>(getpz2(zq+s*0xd));
const ymm ze = scalepz2<N,mode>(getpz2(zq+s*0xe));
const ymm zf = scalepz2<N,mode>(getpz2(zq+s*0xf));
const ymm a08 = addpz2(z0, z8); const ymm s08 = subpz2(z0, z8);
const ymm a4c = addpz2(z4, zc); const ymm s4c = subpz2(z4, zc);
const ymm a2a = addpz2(z2, za); const ymm s2a = subpz2(z2, za);
const ymm a6e = addpz2(z6, ze); const ymm s6e = subpz2(z6, ze);
const ymm a19 = addpz2(z1, z9); const ymm s19 = subpz2(z1, z9);
const ymm a5d = addpz2(z5, zd); const ymm s5d = subpz2(z5, zd);
const ymm a3b = addpz2(z3, zb); const ymm s3b = subpz2(z3, zb);
const ymm a7f = addpz2(z7, zf); const ymm s7f = subpz2(z7, zf);
const ymm js4c = jxpz2(s4c);
const ymm js6e = jxpz2(s6e);
const ymm js5d = jxpz2(s5d);
const ymm js7f = jxpz2(s7f);
const ymm a08p1a4c = addpz2(a08, a4c); const ymm s08mjs4c = subpz2(s08, js4c);
const ymm a08m1a4c = subpz2(a08, a4c); const ymm s08pjs4c = addpz2(s08, js4c);
const ymm a2ap1a6e = addpz2(a2a, a6e); const ymm s2amjs6e = subpz2(s2a, js6e);
const ymm a2am1a6e = subpz2(a2a, a6e); const ymm s2apjs6e = addpz2(s2a, js6e);
const ymm a19p1a5d = addpz2(a19, a5d); const ymm s19mjs5d = subpz2(s19, js5d);
const ymm a19m1a5d = subpz2(a19, a5d); const ymm s19pjs5d = addpz2(s19, js5d);
const ymm a3bp1a7f = addpz2(a3b, a7f); const ymm s3bmjs7f = subpz2(s3b, js7f);
const ymm a3bm1a7f = subpz2(a3b, a7f); const ymm s3bpjs7f = addpz2(s3b, js7f);
const ymm w8_s2amjs6e = w8xpz2(s2amjs6e);
const ymm j_a2am1a6e = jxpz2(a2am1a6e);
const ymm v8_s2apjs6e = v8xpz2(s2apjs6e);
const ymm a08p1a4c_p1_a2ap1a6e = addpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_pw_s2amjs6e = addpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_mj_a2am1a6e = subpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_mv_s2apjs6e = subpz2(s08pjs4c, v8_s2apjs6e);
const ymm a08p1a4c_m1_a2ap1a6e = subpz2(a08p1a4c, a2ap1a6e);
const ymm s08mjs4c_mw_s2amjs6e = subpz2(s08mjs4c, w8_s2amjs6e);
const ymm a08m1a4c_pj_a2am1a6e = addpz2(a08m1a4c, j_a2am1a6e);
const ymm s08pjs4c_pv_s2apjs6e = addpz2(s08pjs4c, v8_s2apjs6e);
const ymm w8_s3bmjs7f = w8xpz2(s3bmjs7f);
const ymm j_a3bm1a7f = jxpz2(a3bm1a7f);
const ymm v8_s3bpjs7f = v8xpz2(s3bpjs7f);
const ymm a19p1a5d_p1_a3bp1a7f = addpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_pw_s3bmjs7f = addpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_mj_a3bm1a7f = subpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_mv_s3bpjs7f = subpz2(s19pjs5d, v8_s3bpjs7f);
const ymm a19p1a5d_m1_a3bp1a7f = subpz2(a19p1a5d, a3bp1a7f);
const ymm s19mjs5d_mw_s3bmjs7f = subpz2(s19mjs5d, w8_s3bmjs7f);
const ymm a19m1a5d_pj_a3bm1a7f = addpz2(a19m1a5d, j_a3bm1a7f);
const ymm s19pjs5d_pv_s3bpjs7f = addpz2(s19pjs5d, v8_s3bpjs7f);
const ymm h1_s19mjs5d_pw_s3bmjs7f = h1xpz2(s19mjs5d_pw_s3bmjs7f);
const ymm w8_a19m1a5d_mj_a3bm1a7f = w8xpz2(a19m1a5d_mj_a3bm1a7f);
const ymm h3_s19pjs5d_mv_s3bpjs7f = h3xpz2(s19pjs5d_mv_s3bpjs7f);
const ymm j_a19p1a5d_m1_a3bp1a7f = jxpz2(a19p1a5d_m1_a3bp1a7f);
const ymm hd_s19mjs5d_mw_s3bmjs7f = hdxpz2(s19mjs5d_mw_s3bmjs7f);
const ymm v8_a19m1a5d_pj_a3bm1a7f = v8xpz2(a19m1a5d_pj_a3bm1a7f);
const ymm hf_s19pjs5d_pv_s3bpjs7f = hfxpz2(s19pjs5d_pv_s3bpjs7f);
setpz2(xq+s*0x0, addpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x1, addpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0x2, addpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0x3, addpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0x4, addpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0x5, subpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0x6, subpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0x7, subpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz2(xq+s*0x8, subpz2(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz2(xq+s*0x9, subpz2(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz2(xq+s*0xa, subpz2(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz2(xq+s*0xb, subpz2(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz2(xq+s*0xc, subpz2(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz2(xq+s*0xd, addpz2(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz2(xq+s*0xe, addpz2(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz2(xq+s*0xf, addpz2(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
template <bool eo, int mode> struct invend<16,1,eo,mode>
{
inline void operator()(complex_vector x, complex_vector y) const noexcept
{
#pragma omp single
{
zeroupper();
complex_vector z = eo ? y : x;
const xmm z0 = scalepz<16,mode>(getpz(z[0x0]));
const xmm z1 = scalepz<16,mode>(getpz(z[0x1]));
const xmm z2 = scalepz<16,mode>(getpz(z[0x2]));
const xmm z3 = scalepz<16,mode>(getpz(z[0x3]));
const xmm z4 = scalepz<16,mode>(getpz(z[0x4]));
const xmm z5 = scalepz<16,mode>(getpz(z[0x5]));
const xmm z6 = scalepz<16,mode>(getpz(z[0x6]));
const xmm z7 = scalepz<16,mode>(getpz(z[0x7]));
const xmm z8 = scalepz<16,mode>(getpz(z[0x8]));
const xmm z9 = scalepz<16,mode>(getpz(z[0x9]));
const xmm za = scalepz<16,mode>(getpz(z[0xa]));
const xmm zb = scalepz<16,mode>(getpz(z[0xb]));
const xmm zc = scalepz<16,mode>(getpz(z[0xc]));
const xmm zd = scalepz<16,mode>(getpz(z[0xd]));
const xmm ze = scalepz<16,mode>(getpz(z[0xe]));
const xmm zf = scalepz<16,mode>(getpz(z[0xf]));
const xmm a08 = addpz(z0, z8); const xmm s08 = subpz(z0, z8);
const xmm a4c = addpz(z4, zc); const xmm s4c = subpz(z4, zc);
const xmm a2a = addpz(z2, za); const xmm s2a = subpz(z2, za);
const xmm a6e = addpz(z6, ze); const xmm s6e = subpz(z6, ze);
const xmm a19 = addpz(z1, z9); const xmm s19 = subpz(z1, z9);
const xmm a5d = addpz(z5, zd); const xmm s5d = subpz(z5, zd);
const xmm a3b = addpz(z3, zb); const xmm s3b = subpz(z3, zb);
const xmm a7f = addpz(z7, zf); const xmm s7f = subpz(z7, zf);
const xmm js4c = jxpz(s4c);
const xmm js6e = jxpz(s6e);
const xmm js5d = jxpz(s5d);
const xmm js7f = jxpz(s7f);
const xmm a08p1a4c = addpz(a08, a4c); const xmm s08mjs4c = subpz(s08, js4c);
const xmm a08m1a4c = subpz(a08, a4c); const xmm s08pjs4c = addpz(s08, js4c);
const xmm a2ap1a6e = addpz(a2a, a6e); const xmm s2amjs6e = subpz(s2a, js6e);
const xmm a2am1a6e = subpz(a2a, a6e); const xmm s2apjs6e = addpz(s2a, js6e);
const xmm a19p1a5d = addpz(a19, a5d); const xmm s19mjs5d = subpz(s19, js5d);
const xmm a19m1a5d = subpz(a19, a5d); const xmm s19pjs5d = addpz(s19, js5d);
const xmm a3bp1a7f = addpz(a3b, a7f); const xmm s3bmjs7f = subpz(s3b, js7f);
const xmm a3bm1a7f = subpz(a3b, a7f); const xmm s3bpjs7f = addpz(s3b, js7f);
const xmm w8_s2amjs6e = w8xpz(s2amjs6e);
const xmm j_a2am1a6e = jxpz(a2am1a6e);
const xmm v8_s2apjs6e = v8xpz(s2apjs6e);
const xmm a08p1a4c_p1_a2ap1a6e = addpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_pw_s2amjs6e = addpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_mj_a2am1a6e = subpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_mv_s2apjs6e = subpz(s08pjs4c, v8_s2apjs6e);
const xmm a08p1a4c_m1_a2ap1a6e = subpz(a08p1a4c, a2ap1a6e);
const xmm s08mjs4c_mw_s2amjs6e = subpz(s08mjs4c, w8_s2amjs6e);
const xmm a08m1a4c_pj_a2am1a6e = addpz(a08m1a4c, j_a2am1a6e);
const xmm s08pjs4c_pv_s2apjs6e = addpz(s08pjs4c, v8_s2apjs6e);
const xmm w8_s3bmjs7f = w8xpz(s3bmjs7f);
const xmm j_a3bm1a7f = jxpz(a3bm1a7f);
const xmm v8_s3bpjs7f = v8xpz(s3bpjs7f);
const xmm a19p1a5d_p1_a3bp1a7f = addpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_pw_s3bmjs7f = addpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_mj_a3bm1a7f = subpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_mv_s3bpjs7f = subpz(s19pjs5d, v8_s3bpjs7f);
const xmm a19p1a5d_m1_a3bp1a7f = subpz(a19p1a5d, a3bp1a7f);
const xmm s19mjs5d_mw_s3bmjs7f = subpz(s19mjs5d, w8_s3bmjs7f);
const xmm a19m1a5d_pj_a3bm1a7f = addpz(a19m1a5d, j_a3bm1a7f);
const xmm s19pjs5d_pv_s3bpjs7f = addpz(s19pjs5d, v8_s3bpjs7f);
const xmm h1_s19mjs5d_pw_s3bmjs7f = h1xpz(s19mjs5d_pw_s3bmjs7f);
const xmm w8_a19m1a5d_mj_a3bm1a7f = w8xpz(a19m1a5d_mj_a3bm1a7f);
const xmm h3_s19pjs5d_mv_s3bpjs7f = h3xpz(s19pjs5d_mv_s3bpjs7f);
const xmm j_a19p1a5d_m1_a3bp1a7f = jxpz(a19p1a5d_m1_a3bp1a7f);
const xmm hd_s19mjs5d_mw_s3bmjs7f = hdxpz(s19mjs5d_mw_s3bmjs7f);
const xmm v8_a19m1a5d_pj_a3bm1a7f = v8xpz(a19m1a5d_pj_a3bm1a7f);
const xmm hf_s19pjs5d_pv_s3bpjs7f = hfxpz(s19pjs5d_pv_s3bpjs7f);
setpz(x[0x0], addpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x1], addpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0x2], addpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0x3], addpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0x4], addpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0x5], subpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0x6], subpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0x7], subpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
setpz(x[0x8], subpz(a08p1a4c_p1_a2ap1a6e, a19p1a5d_p1_a3bp1a7f));
setpz(x[0x9], subpz(s08pjs4c_pv_s2apjs6e, hf_s19pjs5d_pv_s3bpjs7f));
setpz(x[0xa], subpz(a08m1a4c_pj_a2am1a6e, v8_a19m1a5d_pj_a3bm1a7f));
setpz(x[0xb], subpz(s08mjs4c_mw_s2amjs6e, hd_s19mjs5d_mw_s3bmjs7f));
setpz(x[0xc], subpz(a08p1a4c_m1_a2ap1a6e, j_a19p1a5d_m1_a3bp1a7f));
setpz(x[0xd], addpz(s08pjs4c_mv_s2apjs6e, h3_s19pjs5d_mv_s3bpjs7f));
setpz(x[0xe], addpz(a08m1a4c_mj_a2am1a6e, w8_a19m1a5d_mj_a3bm1a7f));
setpz(x[0xf], addpz(s08mjs4c_pw_s2amjs6e, h1_s19mjs5d_pw_s3bmjs7f));
}
}
};
///////////////////////////////////////////////////////////////////////////////
// Inverse FFT
///////////////////////////////////////////////////////////////////////////////
template <int n, int s, bool eo, int mode> struct invfft
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector W) const noexcept
{
invfft<n/16,16*s,!eo,mode>()(y, x, W);
invcore<n,s>()(x, y, W);
}
};
template <int s, bool eo, int mode> struct invfft<16,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
invend<16,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct invfft<8,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT8omp::invend<8,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct invfft<4,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::invend<4,s,eo,mode>()(x, y);
}
};
template <int s, bool eo, int mode> struct invfft<2,s,eo,mode>
{
inline void operator()(
complex_vector x, complex_vector y, const_complex_vector) const noexcept
{
OTFFT_AVXDIT4omp::invend<2,s,eo,mode>()(x, y);
}
};
///////////////////////////////////////////////////////////////////////////////
// Power of 2 FFT Routine
///////////////////////////////////////////////////////////////////////////////
inline void fwd(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_length;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: fwdfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: fwdfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: fwdfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: fwdfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: fwdfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: fwdfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: fwdfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: fwdfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: fwdfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: fwdfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: fwdfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: fwdfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: fwdfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: fwdfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: fwdfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: fwdfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: fwdfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: fwdfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: fwdfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: fwdfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: fwdfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: fwdfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: fwdfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: fwdfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void fwd0(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_1;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: fwdfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: fwdfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: fwdfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: fwdfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: fwdfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: fwdfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: fwdfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: fwdfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: fwdfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: fwdfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: fwdfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: fwdfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: fwdfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: fwdfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: fwdfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: fwdfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: fwdfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: fwdfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: fwdfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: fwdfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: fwdfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: fwdfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: fwdfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: fwdfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void fwdu(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_unitary;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: fwdfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: fwdfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: fwdfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: fwdfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: fwdfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: fwdfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: fwdfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: fwdfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: fwdfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: fwdfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: fwdfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: fwdfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: fwdfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: fwdfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: fwdfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: fwdfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: fwdfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: fwdfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: fwdfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: fwdfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: fwdfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: fwdfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: fwdfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: fwdfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void fwdn(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
fwd(log_N, x, y, W);
}
///////////////////////////////////////////////////////////////////////////////
inline void inv(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_1;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: invfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: invfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: invfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: invfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: invfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: invfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: invfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: invfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: invfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: invfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: invfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: invfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: invfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: invfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: invfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: invfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: invfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: invfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: invfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: invfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: invfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: invfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: invfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: invfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void inv0(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
inv(log_N, x, y, W);
}
inline void invu(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_unitary;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: invfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: invfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: invfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: invfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: invfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: invfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: invfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: invfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: invfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: invfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: invfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: invfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: invfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: invfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: invfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: invfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: invfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: invfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: invfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: invfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: invfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: invfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: invfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: invfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
inline void invn(const int log_N,
complex_vector x, complex_vector y, const_complex_vector W) noexcept
{
static const int mode = scale_length;
#pragma omp parallel firstprivate(x,y,W)
switch (log_N) {
case 0: break;
case 1: invfft<(1<< 1),1,0,mode>()(x, y, W); break;
case 2: invfft<(1<< 2),1,0,mode>()(x, y, W); break;
case 3: invfft<(1<< 3),1,0,mode>()(x, y, W); break;
case 4: invfft<(1<< 4),1,0,mode>()(x, y, W); break;
case 5: invfft<(1<< 5),1,0,mode>()(x, y, W); break;
case 6: invfft<(1<< 6),1,0,mode>()(x, y, W); break;
case 7: invfft<(1<< 7),1,0,mode>()(x, y, W); break;
case 8: invfft<(1<< 8),1,0,mode>()(x, y, W); break;
case 9: invfft<(1<< 9),1,0,mode>()(x, y, W); break;
case 10: invfft<(1<<10),1,0,mode>()(x, y, W); break;
case 11: invfft<(1<<11),1,0,mode>()(x, y, W); break;
case 12: invfft<(1<<12),1,0,mode>()(x, y, W); break;
case 13: invfft<(1<<13),1,0,mode>()(x, y, W); break;
case 14: invfft<(1<<14),1,0,mode>()(x, y, W); break;
case 15: invfft<(1<<15),1,0,mode>()(x, y, W); break;
case 16: invfft<(1<<16),1,0,mode>()(x, y, W); break;
case 17: invfft<(1<<17),1,0,mode>()(x, y, W); break;
case 18: invfft<(1<<18),1,0,mode>()(x, y, W); break;
case 19: invfft<(1<<19),1,0,mode>()(x, y, W); break;
case 20: invfft<(1<<20),1,0,mode>()(x, y, W); break;
case 21: invfft<(1<<21),1,0,mode>()(x, y, W); break;
case 22: invfft<(1<<22),1,0,mode>()(x, y, W); break;
case 23: invfft<(1<<23),1,0,mode>()(x, y, W); break;
case 24: invfft<(1<<24),1,0,mode>()(x, y, W); break;
}
}
} /////////////////////////////////////////////////////////////////////////////
}
|
chesv.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zhesv.c, normal z -> c, Fri Sep 28 17:38:07 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include <string.h>
/***************************************************************************//**
*
* @ingroup plasma_hesv
*
* Solves a system of linear equations A * X = B with LTLt factorization.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
* TODO: only support Lower for now
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of
* columns of the matrix B. nrhs >= 0.
*
* @param[in,out] A
* Details of the LTL factorization of the Hermitian matrix A,
* as computed by plasma_chetrf.
*
* @param[in] lda
* The leading dimension of the array A.
*
* @param[in,out] T
* Details of the LU factorization of the band matrix A, as
* computed by plasma_cgbtrf.
*
* @param[in] ldt
* The leading dimension of the array T.
*
* @param[in] ipiv
* The pivot indices used for chetrf; for 1 <= i <= min(m,n),
* row i of the matrix was interchanged with row ipiv(i).
*
* @param[in] ipiv2
* The pivot indices used for cgbtrf; for 1 <= i <= min(m,n),
* row i of the matrix was interchanged with row ipiv(i).
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_chesv
* @sa plasma_chesv
* @sa plasma_dsysv
* @sa plasma_ssysv
* @sa plasma_chetrf
* @sa plasma_chetrs
*
******************************************************************************/
int plasma_chesv(plasma_enum_t uplo, int n, int nrhs,
plasma_complex32_t *pA, int lda,
int *ipiv,
plasma_complex32_t *pT, int ldt,
int *ipiv2,
plasma_complex32_t *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (//(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo (Upper not supported, yet)");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -5;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (imax(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_hetrf(plasma, PlasmaComplexFloat, n);
// Set tiling parameters.
int nb = plasma->nb;
// Adjust max number of panel threads
int max_panel_threads_gbtrf = 1;
int max_panel_threads_hetrf = 1;
if (plasma->max_panel_threads > 3) {
max_panel_threads_gbtrf = 2;
}
max_panel_threads_hetrf = imax(1, plasma->max_panel_threads - max_panel_threads_gbtrf);
plasma->max_panel_threads = max_panel_threads_hetrf;
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// Initialize tile matrix descriptors.
plasma_desc_t A;
plasma_desc_t T;
plasma_desc_t B;
int tku = (nb+nb+nb-1)/nb; // number of tiles in upper band (not including diagonal)
int tkl = (nb+nb-1)/nb; // number of tiles in lower band (not including diagonal)
int lm = (tku+tkl+1)*nb; // since we use cgetrf on panel, we pivot back within panel.
// this could fill the last tile of the panel,
// and we need extra NB space on the bottom
int retval;
retval = plasma_desc_triangular_create(PlasmaComplexFloat, uplo, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_band_create(PlasmaComplexFloat, PlasmaGeneral,
nb, nb, lm, n, 0, 0, n, n, nb, nb,
&T);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_band_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Create workspace.
plasma_desc_t W;
int tot = 3;
int ldw = (1+(4+tot)*A.mt)*nb; // block column
retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb,
ldw, nb, 0, 0, ldw, nb, &W);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// Initialize data.
memset(T.matrix, 0, ldt*n*sizeof(plasma_complex32_t));
memset(W.matrix, 0, ldw*nb*sizeof(plasma_complex32_t));
for (int i = 0; i < nb; i++) ipiv[i] = 1+i;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_ctr2desc(pA, lda, A, &sequence, &request);
plasma_omp_cpb2desc(pT, ldt, T, &sequence, &request);
plasma_omp_cge2desc(pB, ldb, B, &sequence, &request);
}
// implicit synchronization
#pragma omp parallel
#pragma omp master
{
// Call the tile async function.
plasma_omp_chesv(uplo, A, ipiv, T, ipiv2, B, W, &sequence, &request);
}
// implicit synchronization
#pragma omp parallel
#pragma omp master
{
// Translate back to LAPACK layout.
plasma_omp_cdesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&T);
plasma_desc_destroy(&B);
plasma_desc_destroy(&W);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_hesv
*
* Solves a system of linear equations using previously
* computed factorization.
* Non-blocking tile version of plasma_chesv().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] A
* The triangular factor U or L from the Cholesky factorization
* A = U^H*U or A = L*L^H, computed by plasma_cpotrf.
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_chesv
* @sa plasma_omp_chesv
* @sa plasma_omp_chesv
* @sa plasma_omp_dsysv
* @sa plasma_omp_ssysv
* @sa plasma_omp_chetrf
* @sa plasma_omp_chetrs
*
******************************************************************************/
void plasma_omp_chesv(plasma_enum_t uplo,
plasma_desc_t A, int *ipiv,
plasma_desc_t T, int *ipiv2,
plasma_desc_t B,
plasma_desc_t W,
plasma_sequence_t *sequence,
plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (//(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo (Upper not supported, yet)");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0 || B.n == 0)
return;
// Call the parallel functions.
plasma_pchetrf_aasen(uplo, A, ipiv, T, W, sequence, request);
plasma_pcgbtrf(T, ipiv2, sequence, request);
// dependency on ipiv
#pragma omp taskwait
if (uplo == PlasmaLower) {
plasma_desc_t vA;
plasma_desc_t vB;
// forward-substitution with L
if (A.m > A.nb) {
vA = plasma_desc_view(A,
A.nb, 0,
A.m-A.nb, A.n-A.nb);
vB = plasma_desc_view(B,
B.nb, 0,
B.m-B.nb, B.n);
plasma_pcgeswp(PlasmaRowwise, B, ipiv, 1, sequence, request);
#pragma omp taskwait
plasma_pctrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, vA,
vB,
sequence, request);
}
// solve with band matrix T
#pragma omp taskwait
plasma_pctbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans,
PlasmaUnit,
1.0, T,
B,
ipiv2,
sequence, request);
plasma_pctbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans,
PlasmaNonUnit,
1.0, T,
B,
ipiv2,
sequence, request);
// backward-substitution with L^H
if (A.m > A.nb) {
plasma_pctrsm(PlasmaLeft, PlasmaLower, PlasmaConjTrans, PlasmaUnit,
1.0, vA,
vB,
sequence, request);
#pragma omp taskwait
plasma_pcgeswp(PlasmaRowwise, B, ipiv, -1, sequence, request);
}
}
else {
// TODO: upper
}
}
|
GB_unop__identity_int8_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int8_int16)
// op(A') function: GB (_unop_tran__identity_int8_int16)
// C type: int8_t
// A type: int16_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = (int8_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int8_int16)
(
int8_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int8_t z = (int8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int8_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
8735.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
{
#pragma omp parallel for schedule(static, 2) num_threads(2)
for (i = 0; i < _PB_NY; i++)
{
y[i] = 0;
}
#pragma omp parallel for schedule(static, 2) num_threads(2)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
GB_compiler.h | //------------------------------------------------------------------------------
// GB_compiler.h: handle compiler variations
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#ifndef GB_COMPILER_H
#define GB_COMPILER_H
//------------------------------------------------------------------------------
// compiler variations
//------------------------------------------------------------------------------
// Determine the restrict keyword, and whether or not variable-length arrays
// are supported.
#if ( _MSC_VER && !__INTEL_COMPILER )
// Microsoft Visual Studio does not have the restrict keyword, but it does
// support __restrict, which is equivalent. Variable-length arrays are
// not supported. OpenMP tasks are not available, GraphBLAS no longer
// uses OpenMP tasks.
#define GB_MICROSOFT 1
#define GB_HAS_VLA 0
#if defined ( __cplusplus )
// C++ does not have the restrict keyword
#define restrict
#else
// C uses __restrict
#define restrict __restrict
#endif
#elif defined ( __cplusplus )
#define GB_MICROSOFT 0
#define GB_HAS_VLA 1
// C++ does not have the restrict keyword
#define restrict
#elif GxB_STDC_VERSION >= 199901L
// ANSI C99 and later have the restrict keyword and variable-length arrays.
#define GB_MICROSOFT 0
#define GB_HAS_VLA 1
#else
// ANSI C95 and earlier have neither
#define GB_MICROSOFT 0
#define GB_HAS_VLA 0
#define restrict
#endif
//------------------------------------------------------------------------------
// Microsoft specific include files
//------------------------------------------------------------------------------
#if GB_MICROSOFT
#include <malloc.h>
#endif
//------------------------------------------------------------------------------
// PGI_COMPILER_BUG
//------------------------------------------------------------------------------
// If GraphBLAS is compiled with -DPGI_COMPILER_BUG, then a workaround is
// enabled for a bug in the PGI compiler. The compiler does not correctly
// handle automatic arrays of variable size.
#ifdef PGI_COMPILER_BUG
// override the ANSI C compiler to turn off variable-length arrays
#undef GB_HAS_VLA
#define GB_HAS_VLA 0
#endif
//------------------------------------------------------------------------------
// OpenMP pragmas and tasks
//------------------------------------------------------------------------------
// GB_PRAGMA(x) becomes "#pragma x", but the way to do this depends on the
// compiler:
#if GB_MICROSOFT
// MS Visual Studio is not ANSI C11 compliant, and uses __pragma:
#define GB_PRAGMA(x) __pragma (x)
#else
// ANSI C11 compilers use _Pragma:
#define GB_PRAGMA(x) _Pragma (#x)
#endif
// construct pragmas for loop vectorization:
#if GB_MICROSOFT
// no #pragma omp simd is available in MS Visual Studio
#define GB_PRAGMA_SIMD
#define GB_PRAGMA_SIMD_REDUCTION(op,s)
#else
// create two kinds of SIMD pragmas:
// GB_PRAGMA_SIMD becomes "#pragma omp simd"
// GB_PRAGMA_SIMD_REDUCTION (+,cij) becomes
// "#pragma omp simd reduction(+:cij)"
#define GB_PRAGMA_SIMD GB_PRAGMA (omp simd)
#define GB_PRAGMA_SIMD_REDUCTION(op,s) GB_PRAGMA (omp simd reduction(op:s))
#endif
#define GB_PRAGMA_IVDEP GB_PRAGMA(ivdep)
//------------------------------------------------------------------------------
// variable-length arrays
//------------------------------------------------------------------------------
// If variable-length arrays are not supported, user-defined types are limited
// in size to 128 bytes or less. Many of the type-generic routines allocate
// workspace for a single scalar of variable size, using a statement:
//
// GB_void aij [xsize] ;
//
// To support non-variable-length arrays in ANSI C95 or earlier, this is used:
//
// GB_void aij [GB_VLA(xsize)] ;
//
// GB_VLA(xsize) is either defined as xsize (for ANSI C99 or later), or a fixed
// size of 128, in which case user-defined types
// are limited to a max of 128 bytes.
#if ( GB_HAS_VLA )
// variable-length arrays are allowed
#define GB_VLA(s) s
#else
// variable-length arrays are not allowed
#define GB_VLA_MAXSIZE 128
#define GB_VLA(s) GB_VLA_MAXSIZE
#endif
#endif
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include "omp.h"
#include "functions.h"
int main (int argc, char **argv) {
int Nthreads = atoi(argv[argc - 1]);
omp_set_num_threads(Nthreads);
//seed value for the randomizer
double seed = clock(); //this will make your program run differently everytime
//double seed = 0; //uncomment this and your program will behave the same everytime it's run
srand(seed);
//declare storage for an ElGamal cryptosytem
unsigned int p, g, h, x;
//begin with rank 0 getting user's input
unsigned int n;
// printf("Enter a number of bits: "); fflush(stdout);
//char status = scanf("%u",&n);
n = 20;
//make sure the input makes sense
if ((n<9)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars)
printf("Unsupported bit size.\n");
return 0;
}
printf("\n");
//setup an ElGamal cryptosystem
setupElGamal(n,&p,&g,&h,&x);
int bufferSize = 1024;
unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char));
//populate the string with a message
strcpy(message, "Hello, this is the message as a string.");
printf("Message = \"%s\"\n", message);
/* Q1.1 Finish this line */
unsigned int charsPerInt = (n-1)/8 ;
padString(message, charsPerInt);
printf("Padded Message = \"%s\"\n", message);
unsigned int Nchars = strlen(message);
unsigned int Nints = strlen(message)/charsPerInt;
//storage for message as elements of Z_p
unsigned int *Zmessage =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
//storage for extra encryption coefficient
unsigned int *a =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
// cast the string into an unsigned int array
convertStringToZ(message, Nchars, Zmessage, Nints);
//Encrypt the Zmessage with the ElGamal cyrptographic system
ElGamalEncrypt(Zmessage,a,Nints,p,g,h);
printf("The encrypted text is: ");
for (unsigned int i=0;i<Nints;i++) {
printf("(%u,%u) ", Zmessage[i], a[i]);
}
printf("]\n");
//Decrypt the Zmessage with the ElGamal cyrptographic system
ElGamalDecrypt(Zmessage,a,Nints,p,x);
convertZToString(Zmessage, Nints, message, Nchars);
printf("Decrypted Message = \"%s\"\n", message);
printf("\n");
//Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel
printf("Using %d OpenMP threads to find the secret key...\n", Nthreads);
/* Q2.3 Parallelize this loop with OpenMP */
double startTime = omp_get_wtime();
int zero = 0;
#pragma omp parallel for
for (unsigned int i=0;i<p-1;i++) {
if(zero == 0){
if (modExp(g,i+1,p)==h) {
#pragma omp critical
printf("Secret key found! x = %u \n", i+1);
zero = 1;
i = p;
//skips the remainding threads once secret key is found i = p
}
}
//#pragma cancel for
}
double endTime = omp_get_wtime();
double totalTime = endTime-startTime;
double work = (double) p;
double throughput = work/totalTime;
printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput);
return 0;
}
|
mmp.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define NRA 1000 /* number of rows in matrix A */
#define NCA 1000 /* number of columns in matrix A */
#define NCB 1000 /* number of columns in matrix B */
#define printf(...)
int main () {
int i, j, k;
/* matrix A to be multiplied */
double **a = (double **) malloc(sizeof(double *) * NRA);
for (i = 0; i < NRA; i ++)
a[i] = (double *) malloc(sizeof(double) * NCA);
/* matrix B to be multiplied */
double **b = (double **) malloc(sizeof(double *) * NCA);
for (i = 0; i < NCA; i ++)
b[i] = (double *) malloc(sizeof(double) * NCB);
/* result matrix C */
double **c = (double **) malloc(sizeof(double *) * NRA);
for (i = 0; i < NRA; i ++)
c[i] = (double *) malloc(sizeof(double) * NCB);
printf("Initializing matrices...\n");
/*** Initialize matrices ***/
for (i=0; i<NRA; i++)
for (j=0; j<NCA; j++)
a[i][j]= i+j;
for (i=0; i<NCA; i++)
for (j=0; j<NCB; j++)
b[i][j]= i*j;
for (i=0; i<NRA; i++)
for (j=0; j<NCB; j++)
c[i][j]= 0;
/*** Do the matrix-matrix multiplication ***/
#pragma omp parallel for private(i, j, k) schedule(dynamic)
for (i=0; i<NRA; i++)
for(j=0; j<NCB; j++)
for (k=0; k<NCA; k++)
c[i][j] += a[i][k] * b[k][j];
/*** Print results ***/
printf("******************************************************\n");
printf("Result Matrix:\n");
for (i=0; i<NRA; i++)
{
for (j=0; j<NCB; j++)
printf("%10.2f ", c[i][j]);
printf("\n");
}
printf("******************************************************\n");
printf ("Done.\n");
for (i = 0; i < NRA; i ++)
free(a[i]);
free(a);
for (i = 0; i < NCA; i ++)
free(b[i]);
free(b);
for (i = 0; i < NRA; i ++)
free(c[i]);
free(c);
return 0;
}
|
perturbation_fold.c | #ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef VRNA_WITH_GSL
#include <gsl/gsl_multimin.h>
#endif
#include "ViennaRNA/eval.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/constraints/hard.h"
#include "ViennaRNA/constraints/soft.h"
#include "ViennaRNA/fold.h"
#include "ViennaRNA/part_func.h"
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/perturbation_fold.h"
static void
calculate_probability_unpaired(vrna_fold_compound_t *vc,
double *probability)
{
int length = vc->length;
FLT_OR_DBL *probs = vc->exp_matrices->probs;
int *iidx = vc->iindx;
int i, j;
for (i = 0; i <= length; ++i)
probability[i] = 1;
for (i = 1; i <= length; ++i)
for (j = i + 1; j <= length; ++j) {
probability[i] -= probs[iidx[i] - j];
probability[j] -= probs[iidx[i] - j];
}
}
#if 0
static double
calculate_norm(double *vector,
int length)
{
double sum = 0;
int i;
for (i = 1; i <= length; ++i)
sum += vector[i] * vector[i];
return sqrt(sum);
}
#endif
static void
addSoftConstraint(vrna_fold_compound_t *vc,
const double *epsilon,
int length)
{
/* remove previous soft constraints */
vrna_sc_init(vc);
/* prepare vector of unpaired constraints in kcal/mol */
FLT_OR_DBL *constraints = (FLT_OR_DBL *)vrna_alloc(sizeof(FLT_OR_DBL) * (length + 1));
memcpy(constraints + 1, epsilon + 1, sizeof(FLT_OR_DBL) * length);
/* add new soft constraints */
vrna_sc_set_up(vc, (const FLT_OR_DBL *)constraints, VRNA_OPTION_DEFAULT);
free(constraints);
}
static double
evaluate_objective_function_contribution(double value,
int objective_function)
{
if (objective_function == VRNA_OBJECTIVE_FUNCTION_QUADRATIC)
return value * value;
if (objective_function == VRNA_OBJECTIVE_FUNCTION_ABSOLUTE)
return fabs(value);
assert(0);
return 0;
}
static double
evaluate_perturbation_vector_score(vrna_fold_compound_t *vc,
const double *epsilon,
const double *q_prob_unpaired,
double sigma_squared,
double tau_squared,
int objective_function)
{
double ret = 0;
double ret2 = 0.;
double *p_prob_unpaired;
int i;
int length = vc->length;
/* calculate pairing probabilty in the pertubated energy model */
p_prob_unpaired = vrna_alloc(sizeof(double) * (length + 1));
addSoftConstraint(vc, epsilon, length);
vc->params->model_details.compute_bpp = 1;
vc->exp_params->model_details.compute_bpp = 1;
/* get new (constrained) MFE to scale pf computations properly */
double mfe = (double)vrna_mfe(vc, NULL);
vrna_exp_params_rescale(vc, &mfe);
vrna_pf(vc, NULL);
calculate_probability_unpaired(vc, p_prob_unpaired);
vrna_sc_remove(vc);
for (i = 1; i <= length; ++i) {
/* add penalty for pertubation energies */
ret += evaluate_objective_function_contribution(epsilon[i], objective_function) / tau_squared;
/* add penalty for mismatches between observed and predicted probabilities */
if (q_prob_unpaired[i] >= 0) /* ignore positions with missing data */
ret2 += evaluate_objective_function_contribution(p_prob_unpaired[i] - q_prob_unpaired[i],
objective_function) / sigma_squared;
}
vrna_message_info(stderr, "Score: pertubation: %g\tdiscrepancy: %g", ret, ret2);
free(p_prob_unpaired);
return ret + ret2;
}
static void
pairing_probabilities_from_restricted_pf(vrna_fold_compound_t *vc,
const double *epsilon,
double *prob_unpaired,
double **conditional_prob_unpaired)
{
int length = vc->length;
int i;
addSoftConstraint(vc, epsilon, length);
vc->params->model_details.compute_bpp = 1;
vc->exp_params->model_details.compute_bpp = 1;
/* get new (constrained) MFE to scale pf computations properly */
double mfe = (double)vrna_mfe(vc, NULL);
vrna_exp_params_rescale(vc, &mfe);
vrna_pf(vc, NULL);
calculate_probability_unpaired(vc, prob_unpaired);
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for (i = 1; i <= length; ++i) {
vrna_fold_compound_t *restricted_vc;
char *hc_string;
unsigned int constraint_options = VRNA_CONSTRAINT_DB
| VRNA_CONSTRAINT_DB_PIPE
| VRNA_CONSTRAINT_DB_DOT
| VRNA_CONSTRAINT_DB_X
| VRNA_CONSTRAINT_DB_ANG_BRACK
| VRNA_CONSTRAINT_DB_RND_BRACK;
hc_string = vrna_alloc(sizeof(char) * (length + 1));
memset(hc_string, '.', length);
hc_string[i - 1] = 'x';
restricted_vc = vrna_fold_compound(vc->sequence,
&(vc->exp_params->model_details),
VRNA_OPTION_DEFAULT);
vrna_constraints_add(restricted_vc, hc_string, constraint_options);
free(hc_string);
vrna_exp_params_subst(restricted_vc, vc->exp_params);
vrna_pf(restricted_vc, NULL);
calculate_probability_unpaired(restricted_vc, conditional_prob_unpaired[i]);
restricted_vc->sc = NULL;
vrna_fold_compound_free(restricted_vc);
}
vrna_sc_remove(vc);
}
static void
pairing_probabilities_from_sampling(vrna_fold_compound_t *vc,
const double *epsilon,
int sample_size,
double *prob_unpaired,
double **conditional_prob_unpaired,
unsigned int options)
{
char **samples, **ptr;
int length, i, j;
double mfe;
length = vc->length;
addSoftConstraint(vc, epsilon, length);
vc->params->model_details.compute_bpp = 0;
vc->exp_params->model_details.compute_bpp = 0;
/* get new (constrained) MFE to scale pf computations properly */
mfe = (double)vrna_mfe(vc, NULL);
vrna_exp_params_rescale(vc, &mfe);
vrna_pf(vc, NULL);
samples = vrna_pbacktrack_num(vc,
(unsigned int)sample_size,
options);
for (ptr = samples; (*ptr); ptr++) {
for (i = length; i > 0; i--) {
if ((*ptr)[i - 1] == '.') {
++prob_unpaired[i];
for (j = length; j > 0; j--)
if ((*ptr)[j - 1] == '.')
++conditional_prob_unpaired[i][j];
}
}
free(*ptr);
}
free(samples);
for (i = 1; i <= length; ++i) {
if (prob_unpaired[i])
for (j = 1; j <= length; ++j)
conditional_prob_unpaired[i][j] /= prob_unpaired[i];
prob_unpaired[i] /= sample_size;
assert(prob_unpaired[i] >= 0 && prob_unpaired[i] <= 1);
}
vrna_sc_remove(vc);
}
static void
allocateProbabilityArrays(double **unpaired,
double ***conditional_unpaired,
int length)
{
int i;
*unpaired = vrna_alloc(sizeof(double) * (length + 1));
*conditional_unpaired = vrna_alloc(sizeof(double *) * (length + 1));
for (i = 1; i <= length; ++i)
(*conditional_unpaired)[i] = vrna_alloc(sizeof(double) * (length + 1));
}
static void
freeProbabilityArrays(double *unpaired,
double **conditional_unpaired,
int length)
{
int i;
free(unpaired);
for (i = 1; i <= length; ++i)
free(conditional_unpaired[i]);
free(conditional_unpaired);
}
static void
evaluate_perturbation_vector_gradient(vrna_fold_compound_t *vc,
const double *epsilon,
const double *q_prob_unpaired,
double sigma_squared,
double tau_squared,
int objective_function,
int sample_size,
double *gradient)
{
double *p_prob_unpaired;
double **p_conditional_prob_unpaired;
int i, mu;
int length = vc->length;
double kT = vc->exp_params->kT / 1000;
allocateProbabilityArrays(&p_prob_unpaired, &p_conditional_prob_unpaired, length);
if (sample_size > 0) {
pairing_probabilities_from_sampling(vc,
epsilon,
sample_size,
p_prob_unpaired,
p_conditional_prob_unpaired,
VRNA_PBACKTRACK_DEFAULT);
} else if (sample_size < 0) {
pairing_probabilities_from_sampling(vc,
epsilon,
-sample_size,
p_prob_unpaired,
p_conditional_prob_unpaired,
VRNA_PBACKTRACK_NON_REDUNDANT);
} else {
pairing_probabilities_from_restricted_pf(vc,
epsilon,
p_prob_unpaired,
p_conditional_prob_unpaired);
}
for (mu = 1; mu <= length; ++mu) {
double sum = 0;
if (objective_function == VRNA_OBJECTIVE_FUNCTION_QUADRATIC) {
for (i = 1; i <= length; ++i) {
if (q_prob_unpaired[i] < 0) /* ignore positions with missing data */
continue;
sum += (p_prob_unpaired[i] - q_prob_unpaired[i])
* p_prob_unpaired[i] * (p_prob_unpaired[mu] - p_conditional_prob_unpaired[i][mu])
/ sigma_squared;
}
gradient[mu] = 2 * (epsilon[mu] / tau_squared + sum / kT);
} else if (objective_function == VRNA_OBJECTIVE_FUNCTION_ABSOLUTE) {
for (i = 1; i <= length; ++i)
if (q_prob_unpaired[i] >= 0 && p_prob_unpaired[i] != q_prob_unpaired[i]) {
sum += (p_prob_unpaired[i] * (p_prob_unpaired[mu] - p_conditional_prob_unpaired[i][mu])) /
kT
/ sigma_squared
* (p_prob_unpaired[i] > q_prob_unpaired[i] ? 1. : -1.);
}
if (epsilon[mu])
sum += (epsilon[mu] > 0 ? 1. : -1.) / tau_squared;
gradient[mu] = sum;
}
}
freeProbabilityArrays(p_prob_unpaired, p_conditional_prob_unpaired, length);
}
#ifdef VRNA_WITH_GSL
typedef struct parameters_gsl {
vrna_fold_compound_t *vc;
const double *q_prob_unpaired;
double sigma_squared;
double tau_squared;
int objective_function;
int sample_size;
} parameters_gsl;
static double
f_gsl(const gsl_vector *x,
void *params)
{
parameters_gsl *p = params;
return evaluate_perturbation_vector_score(p->vc,
x->data,
p->q_prob_unpaired,
p->sigma_squared,
p->tau_squared,
p->objective_function);
}
static void
df_gsl(const gsl_vector *x,
void *params,
gsl_vector *df)
{
parameters_gsl *p = params;
gsl_vector_set(df, 0, 0);
evaluate_perturbation_vector_gradient(p->vc,
x->data,
p->q_prob_unpaired,
p->sigma_squared,
p->tau_squared,
p->objective_function,
p->sample_size,
df->data);
}
static void
fdf_gsl(const gsl_vector *x,
void *params,
double *f,
gsl_vector *g)
{
*f = f_gsl(x, params);
df_gsl(x, params, g);
}
#endif /* VRNA_WITH_GSL */
PUBLIC void
vrna_sc_minimize_pertubation(vrna_fold_compound_t *vc,
const double *q_prob_unpaired,
int objective_function,
double sigma_squared,
double tau_squared,
int algorithm,
int sample_size,
double *epsilon,
double initialStepSize,
double minStepSize,
double minImprovement,
double minimizerTolerance,
progress_callback callback)
{
int iteration_count = 0;
const int max_iterations = 100;
int length = vc->length;
#ifdef VRNA_WITH_GSL
const gsl_multimin_fdfminimizer_type *minimizer_type = 0;
struct {
int type;
const gsl_multimin_fdfminimizer_type *gsl_type;
} algorithms[] =
{ { VRNA_MINIMIZER_CONJUGATE_FR,
gsl_multimin_fdfminimizer_conjugate_fr },
{ VRNA_MINIMIZER_CONJUGATE_PR,
gsl_multimin_fdfminimizer_conjugate_pr },
{ VRNA_MINIMIZER_VECTOR_BFGS,
gsl_multimin_fdfminimizer_vector_bfgs },
{ VRNA_MINIMIZER_VECTOR_BFGS2,
gsl_multimin_fdfminimizer_vector_bfgs2 },
{ VRNA_MINIMIZER_STEEPEST_DESCENT,
gsl_multimin_fdfminimizer_steepest_descent },
{ 0,
NULL } };
int i;
for (i = 0; algorithms[i].type; ++i)
if (algorithms[i].type == algorithm) {
minimizer_type = algorithms[i].gsl_type;
break;
}
if (minimizer_type) {
parameters_gsl parameters;
gsl_multimin_function_fdf fdf;
gsl_multimin_fdfminimizer *minimizer;
gsl_vector *vector;
int status;
parameters.vc = vc;
parameters.q_prob_unpaired = q_prob_unpaired;
parameters.sigma_squared = sigma_squared;
parameters.tau_squared = tau_squared;
parameters.objective_function = objective_function;
parameters.sample_size = sample_size;
fdf.n = length + 1;
fdf.f = &f_gsl;
fdf.df = &df_gsl;
fdf.fdf = &fdf_gsl;
fdf.params = (void *)¶meters;
minimizer = gsl_multimin_fdfminimizer_alloc(minimizer_type, length + 1);
vector = gsl_vector_calloc(length + 1);
/* gsl_multimin_fdfminimizer_set(minimizer, &fdf, vector, 0.01, 1e-4); */
gsl_multimin_fdfminimizer_set(minimizer, &fdf, vector, initialStepSize, minimizerTolerance);
if (callback)
callback(0, minimizer->f, minimizer->x->data);
do {
++iteration_count;
status = gsl_multimin_fdfminimizer_iterate(minimizer);
if (callback)
callback(iteration_count, minimizer->f, minimizer->x->data);
if (status)
break;
status = gsl_multimin_test_gradient(minimizer->gradient, minimizerTolerance);
} while (status == GSL_CONTINUE && iteration_count < max_iterations);
memcpy(epsilon, minimizer->x->data, sizeof(double) * (length + 1));
gsl_multimin_fdfminimizer_free(minimizer);
gsl_vector_free(vector);
return;
}
#endif /* VRNA_WITH_GSL */
double improvement;
const double min_improvement = minImprovement;
double *new_epsilon = vrna_alloc(sizeof(double) * (length + 1));
double *gradient = vrna_alloc(sizeof(double) * (length + 1));
double score = evaluate_perturbation_vector_score(vc,
epsilon,
q_prob_unpaired,
sigma_squared,
tau_squared,
objective_function);
if (callback)
callback(0, score, epsilon);
do {
double new_score;
double step_size;
++iteration_count;
evaluate_perturbation_vector_gradient(vc,
epsilon,
q_prob_unpaired,
sigma_squared,
tau_squared,
objective_function,
sample_size,
gradient);
/* step_size = 0.5 / calculate_norm(gradient, length);*/
step_size = initialStepSize;
do {
int i;
for (i = 1; i <= length; ++i)
new_epsilon[i] = epsilon[i] - step_size * gradient[i];
new_score = evaluate_perturbation_vector_score(vc,
new_epsilon,
q_prob_unpaired,
sigma_squared,
tau_squared,
objective_function);
improvement = 1 - new_score / score;
step_size /= 2;
} while ((improvement < min_improvement) && (step_size >= minStepSize));
if (new_score > score)
break;
if (callback)
callback(iteration_count, new_score, new_epsilon);
score = new_score;
memcpy(epsilon, new_epsilon, sizeof(double) * (length + 1));
} while (improvement >= min_improvement && iteration_count < max_iterations);
free(gradient);
free(new_epsilon);
}
|
pi.c | #include <stdio.h>
#include <omp.h>
int nthreads;
double seq_pi_calc(long num_steps)
{
double sum=0.0,x;
int i;
for(i=0;i<num_steps;++i)
{
x = (i + 0.5)/num_steps;
sum = sum + 4.0/(1.0 + x*x);
}
return sum/num_steps;
}
double parallel_pi_calc(long num_steps,int NUM_THREADS)
{
double sum=0.0;
omp_set_num_threads(NUM_THREADS);
#pragma omp parallel
{
int ID = omp_get_thread_num();
int increment = omp_get_num_threads();
if(ID == 0)
nthreads = increment;
long j;
double t_sum=0,x;
for(j=ID;j < num_steps;j+=increment)
{
x = (j + 0.5)/num_steps;
t_sum = t_sum + 4.0/(1.0 + x*x);
}
#pragma omp critical
{
sum = sum + t_sum;
}
}
return (double)sum/num_steps;
}
int main()
{
int i;
static long num_steps = 10000000;
double time_taken_seq,time_taken_parallel,pi;
//Sequential Pi Calculation
time_taken_seq = omp_get_wtime();
pi = seq_pi_calc(num_steps);
time_taken_seq = omp_get_wtime() - time_taken_seq;
printf("Value of Pi by Sequential Calculation : %lf\n",pi );
//Parallel Pi Calculation
int NUM_THREADS = 2;
printf("Parallel Calculation\n");
while(NUM_THREADS <= 20)
{
time_taken_parallel = omp_get_wtime();
pi = parallel_pi_calc(num_steps,NUM_THREADS);
time_taken_parallel = omp_get_wtime() - time_taken_parallel;
printf("Pi : %lf \t Speedup: %lf \t Threads : %d\n", pi,time_taken_parallel/time_taken_seq,NUM_THREADS);
NUM_THREADS++;
}
} |
parallel-inl.h | //
// parallel-inl.h
// DigitalRender
//
// Created by 杨丰 on 2020/11/3.
//
#ifndef parallel_inl_h
#define parallel_inl_h
#include "constants.h"
#include <algorithm>
#include <functional>
#include <future>
#include <vector>
#define JET_TASKING_TBB true
#ifdef JET_TASKING_TBB
#include <tbb/parallel_for.h>
#include <tbb/parallel_reduce.h>
#include <tbb/parallel_sort.h>
#include <tbb/task.h>
#elif defined(JET_TASKING_CPP11THREADS)
#include <thread>
#endif
namespace internal {
// NOTE - This abstraction takes a lambda which should take captured
// variables by *value* to ensure no captured references race
// with the task itself.
template <typename TASK_T>
inline void schedule(TASK_T&& fcn) {
#ifdef JET_TASKING_TBB
struct LocalTBBTask : public tbb::task {
TASK_T func;
tbb::task* execute() override {
func();
return nullptr;
}
LocalTBBTask(TASK_T&& f) : func(std::forward<TASK_T>(f)) {}
};
auto* tbb_node = new (tbb::task::allocate_root())
LocalTBBTask(std::forward<TASK_T>(fcn));
tbb::task::enqueue(*tbb_node);
#elif defined(JET_TASKING_CPP11THREADS)
std::thread thread(fcn);
thread.detach();
#else // OpenMP or Serial --> synchronous!
fcn();
#endif
}
template <typename TASK_T>
using operator_return_t = typename std::result_of<TASK_T()>::type;
// NOTE - see above, same issues associated with schedule()
template <typename TASK_T>
inline auto async(TASK_T&& fcn) -> std::future<operator_return_t<TASK_T>> {
using package_t = std::packaged_task<operator_return_t<TASK_T>()>;
auto task = new package_t(std::forward<TASK_T>(fcn));
auto future = task->get_future();
schedule([=]() {
(*task)();
delete task;
});
return future;
}
// Adopted from:
// Radenski, A.
// Shared Memory, Message Passing, and Hybrid Merge Sorts for Standalone and
// Clustered SMPs. Proc PDPTA'11, the 2011 International Conference on Parallel
// and Distributed Processing Techniques and Applications, CSREA Press
// (H. Arabnia, Ed.), 2011, pp. 367 - 373.
template <typename RandomIterator, typename RandomIterator2,
typename CompareFunction>
void merge(RandomIterator a, size_t size, RandomIterator2 temp,
CompareFunction compareFunction) {
size_t i1 = 0;
size_t i2 = size / 2;
size_t tempi = 0;
while (i1 < size / 2 && i2 < size) {
if (compareFunction(a[i1], a[i2])) {
temp[tempi] = a[i1];
i1++;
} else {
temp[tempi] = a[i2];
i2++;
}
tempi++;
}
while (i1 < size / 2) {
temp[tempi] = a[i1];
i1++;
tempi++;
}
while (i2 < size) {
temp[tempi] = a[i2];
i2++;
tempi++;
}
// Copy sorted temp array into main array, a
parallelFor(kZeroSize, size, [&](size_t i) { a[i] = temp[i]; });
}
template <typename RandomIterator, typename RandomIterator2,
typename CompareFunction>
void parallelMergeSort(RandomIterator a, size_t size, RandomIterator2 temp,
unsigned int numThreads,
CompareFunction compareFunction) {
if (numThreads == 1) {
std::sort(a, a + size, compareFunction);
} else if (numThreads > 1) {
std::vector<std::future<void>> pool;
pool.reserve(2);
auto launchRange = [compareFunction](RandomIterator begin, size_t k2,
RandomIterator2 temp,
unsigned int numThreads) {
parallelMergeSort(begin, k2, temp, numThreads, compareFunction);
};
pool.emplace_back(internal::async(
[=]() { launchRange(a, size / 2, temp, numThreads / 2); }));
pool.emplace_back(internal::async([=]() {
launchRange(a + size / 2, size - size / 2, temp + size / 2,
numThreads - numThreads / 2);
}));
// Wait for jobs to finish
for (auto& f : pool) {
if (f.valid()) {
f.wait();
}
}
merge(a, size, temp, compareFunction);
}
}
} // namespace internal
template <typename RandomIterator, typename T>
void parallelFill(const RandomIterator& begin, const RandomIterator& end,
const T& value, ExecutionPolicy policy) {
auto diff = end - begin;
if (diff <= 0) {
return;
}
size_t size = static_cast<size_t>(diff);
parallelFor(kZeroSize, size, [begin, value](size_t i) { begin[i] = value; },
policy);
}
// Adopted from http://ideone.com/Z7zldb
template <typename IndexType, typename Function>
void parallelFor(IndexType start, IndexType end, const Function& func,
ExecutionPolicy policy) {
if (start > end) {
return;
}
#ifdef JET_TASKING_TBB
if (policy == ExecutionPolicy::kParallel) {
tbb::parallel_for(start, end, func);
} else {
for (auto i = start; i < end; ++i) {
func(i);
}
}
#elif JET_TASKING_CPP11THREADS
// Estimate number of threads in the pool
unsigned int numThreadsHint = maxNumberOfThreads();
const unsigned int numThreads =
(policy == ExecutionPolicy::kParallel)
? (numThreadsHint == 0u ? 8u : numThreadsHint)
: 1;
// Size of a slice for the range functions
IndexType n = end - start + 1;
IndexType slice =
(IndexType)std::round(n / static_cast<double>(numThreads));
slice = std::max(slice, IndexType(1));
// [Helper] Inner loop
auto launchRange = [&func](IndexType k1, IndexType k2) {
for (IndexType k = k1; k < k2; k++) {
func(k);
}
};
// Create pool and launch jobs
std::vector<std::thread> pool;
pool.reserve(numThreads);
IndexType i1 = start;
IndexType i2 = std::min(start + slice, end);
for (unsigned int i = 0; i + 1 < numThreads && i1 < end; ++i) {
pool.emplace_back(launchRange, i1, i2);
i1 = i2;
i2 = std::min(i2 + slice, end);
}
if (i1 < end) {
pool.emplace_back(launchRange, i1, end);
}
// Wait for jobs to finish
for (std::thread& t : pool) {
if (t.joinable()) {
t.join();
}
}
#else
#ifdef JET_TASKING_OPENMP
if (policy == ExecutionPolicy::kParallel) {
#pragma omp parallel for
#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
for (ssize_t i = start; i < ssize_t(end); ++i) {
#else // !MSVC || Intel
for (auto i = start; i < end; ++i) {
#endif // MSVC && !Intel
func(i);
}
} else {
for (auto i = start; i < end; ++i) {
func(i);
}
}
#else // JET_TASKING_OPENMP
for (auto i = start; i < end; ++i) {
func(i);
}
#endif // JET_TASKING_OPENMP
#endif
}
template <typename IndexType, typename Function>
void parallelRangeFor(IndexType start, IndexType end, const Function& func,
ExecutionPolicy policy) {
if (start > end) {
return;
}
#ifdef JET_TASKING_TBB
if (policy == ExecutionPolicy::kParallel) {
tbb::parallel_for(tbb::blocked_range<IndexType>(start, end),
[&func](const tbb::blocked_range<IndexType>& range) {
func(range.begin(), range.end());
});
} else {
func(start, end);
}
#else
// Estimate number of threads in the pool
unsigned int numThreadsHint = maxNumberOfThreads();
const unsigned int numThreads =
(policy == ExecutionPolicy::kParallel)
? (numThreadsHint == 0u ? 8u : numThreadsHint)
: 1;
// Size of a slice for the range functions
IndexType n = end - start + 1;
IndexType slice =
(IndexType)std::round(n / static_cast<double>(numThreads));
slice = std::max(slice, IndexType(1));
// Create pool and launch jobs
std::vector<std::future<void>> pool;
pool.reserve(numThreads);
IndexType i1 = start;
IndexType i2 = std::min(start + slice, end);
for (unsigned int i = 0; i + 1 < numThreads && i1 < end; ++i) {
pool.emplace_back(internal::async([=]() { func(i1, i2); }));
i1 = i2;
i2 = std::min(i2 + slice, end);
}
if (i1 < end) {
pool.emplace_back(internal::async([=]() { func(i1, end); }));
}
// Wait for jobs to finish
for (auto& f : pool) {
if (f.valid()) {
f.wait();
}
}
#endif
}
template <typename IndexType, typename Function>
void parallelFor(IndexType beginIndexX, IndexType endIndexX,
IndexType beginIndexY, IndexType endIndexY,
const Function& function, ExecutionPolicy policy) {
parallelFor(beginIndexY, endIndexY,
[&](IndexType j) {
for (IndexType i = beginIndexX; i < endIndexX; ++i) {
function(i, j);
}
},
policy);
}
template <typename IndexType, typename Function>
void parallelRangeFor(IndexType beginIndexX, IndexType endIndexX,
IndexType beginIndexY, IndexType endIndexY,
const Function& function, ExecutionPolicy policy) {
parallelRangeFor(beginIndexY, endIndexY,
[&](IndexType jBegin, IndexType jEnd) {
function(beginIndexX, endIndexX, jBegin, jEnd);
},
policy);
}
template <typename IndexType, typename Function>
void parallelFor(IndexType beginIndexX, IndexType endIndexX,
IndexType beginIndexY, IndexType endIndexY,
IndexType beginIndexZ, IndexType endIndexZ,
const Function& function, ExecutionPolicy policy) {
parallelFor(beginIndexZ, endIndexZ,
[&](IndexType k) {
for (IndexType j = beginIndexY; j < endIndexY; ++j) {
for (IndexType i = beginIndexX; i < endIndexX; ++i) {
function(i, j, k);
}
}
},
policy);
}
template <typename IndexType, typename Function>
void parallelRangeFor(IndexType beginIndexX, IndexType endIndexX,
IndexType beginIndexY, IndexType endIndexY,
IndexType beginIndexZ, IndexType endIndexZ,
const Function& function, ExecutionPolicy policy) {
parallelRangeFor(beginIndexZ, endIndexZ,
[&](IndexType kBegin, IndexType kEnd) {
function(beginIndexX, endIndexX, beginIndexY,
endIndexY, kBegin, kEnd);
},
policy);
}
template <typename IndexType, typename Value, typename Function,
typename Reduce>
Value parallelReduce(IndexType start, IndexType end, const Value& identity,
const Function& func, const Reduce& reduce,
ExecutionPolicy policy) {
if (start > end) {
return identity;
}
#ifdef JET_TASKING_TBB
if (policy == ExecutionPolicy::kParallel) {
return tbb::parallel_reduce(
tbb::blocked_range<IndexType>(start, end), identity,
[&func](const tbb::blocked_range<IndexType>& range,
const Value& init) {
return func(range.begin(), range.end(), init);
},
reduce);
} else {
(void)reduce;
return func(start, end, identity);
}
#else
// Estimate number of threads in the pool
unsigned int numThreadsHint = maxNumberOfThreads();
const unsigned int numThreads =
(policy == ExecutionPolicy::kParallel)
? (numThreadsHint == 0u ? 8u : numThreadsHint)
: 1;
// Size of a slice for the range functions
IndexType n = end - start + 1;
IndexType slice =
(IndexType)std::round(n / static_cast<double>(numThreads));
slice = std::max(slice, IndexType(1));
// Results
std::vector<Value> results(numThreads, identity);
// [Helper] Inner loop
auto launchRange = [&](IndexType k1, IndexType k2, unsigned int tid) {
results[tid] = func(k1, k2, identity);
};
// Create pool and launch jobs
std::vector<std::future<void>> pool;
pool.reserve(numThreads);
IndexType i1 = start;
IndexType i2 = std::min(start + slice, end);
unsigned int tid = 0;
for (; tid + 1 < numThreads && i1 < end; ++tid) {
pool.emplace_back(internal::async([=]() { launchRange(i1, i2, tid); }));
i1 = i2;
i2 = std::min(i2 + slice, end);
}
if (i1 < end) {
pool.emplace_back(
internal::async([=]() { launchRange(i1, end, tid); }));
}
// Wait for jobs to finish
for (auto& f : pool) {
if (f.valid()) {
f.wait();
}
}
// Gather
Value finalResult = identity;
for (const Value& val : results) {
finalResult = reduce(val, finalResult);
}
return finalResult;
#endif
}
template <typename RandomIterator, typename CompareFunction>
void parallelSort(RandomIterator begin, RandomIterator end,
CompareFunction compareFunction, ExecutionPolicy policy) {
if (end < begin) {
return;
}
#ifdef JET_TASKING_TBB
if (policy == ExecutionPolicy::kParallel) {
tbb::parallel_sort(begin, end, compareFunction);
} else {
std::sort(begin, end, compareFunction);
}
#else
size_t size = static_cast<size_t>(end - begin);
typedef
typename std::iterator_traits<RandomIterator>::value_type value_type;
std::vector<value_type> temp(size);
// Estimate number of threads in the pool
unsigned int numThreadsHint = maxNumberOfThreads();
const unsigned int numThreads =
(policy == ExecutionPolicy::kParallel)
? (numThreadsHint == 0u ? 8u : numThreadsHint)
: 1;
internal::parallelMergeSort(begin, size, temp.begin(), numThreads,
compareFunction);
#endif
}
template <typename RandomIterator>
void parallelSort(RandomIterator begin, RandomIterator end,
ExecutionPolicy policy) {
parallelSort(
begin, end,
std::less<typename std::iterator_traits<RandomIterator>::value_type>(),
policy);
}
#endif /* parallel_inl_h */
|
openbsdsoftraid_fmt_plug.c | /*
* Copyright (c) 2014 Thiébaud Weksteen <thiebaud at weksteen dot fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Fixed BE issues, and build problems (Fall 2014), JimF.
*/
#include "arch.h"
#if FMT_EXTERNS_H
extern struct fmt_main fmt_openbsd_softraid;
#elif FMT_REGISTERS_H
john_register_one(&fmt_openbsd_softraid);
#else
#include <openssl/evp.h>
#include <openssl/aes.h>
#include <openssl/hmac.h>
#include <openssl/sha.h>
#include "common.h"
#include "formats.h"
#include "pbkdf2_hmac_sha1.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 1
#endif
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#ifdef MMX_COEF
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define OPENBSD_SOFTRAID_SALTLENGTH 128
#define OPENBSD_SOFTRAID_KEYS 32
#define OPENBSD_SOFTRAID_KEYLENGTH 64 /* AES-XTS-256 keys are 512 bits long */
#define OPENBSD_SOFTRAID_MACLENGTH 20
#define BINARY_SIZE OPENBSD_SOFTRAID_MACLENGTH
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
static char (*key_buffer)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
unsigned int num_iterations;
unsigned char salt[OPENBSD_SOFTRAID_SALTLENGTH];
unsigned char masked_keys[OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS];
} *cur_salt;
static void init(struct fmt_main *self)
{
OpenSSL_add_all_algorithms();
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
key_buffer = mem_calloc_tiny(sizeof(*key_buffer) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char* ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
int i;
char *p;
if (strncmp(ciphertext, "$openbsd-softraid$", 18) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 18;
if ((p = strtok(ctcopy, "$")) == NULL)
goto err;
i = atoi(p);
if (i < 0) /* iterations */
goto err;
if ((p = strtok(NULL, "$")) == NULL)
goto err;
if (strlen(p) != 2 * 128) /* salt */
goto err;
if ((p = strtok(NULL, "$")) == NULL)
goto err;
if (strlen(p) != 2 * 32 * 64) /* masked keys */
goto err;
if ((p = strtok(NULL, "$")) == NULL)
goto err;
if (strlen(p) != 2 * BINARY_SIZE) /* HMAC-SHA1 */
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static void* get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
ctcopy += 18;
p = strtok(ctcopy, "$"); /* iterations */
cs.num_iterations = atoi(p);
p = strtok(NULL, "$"); /* salt */
for (i = 0; i < OPENBSD_SOFTRAID_SALTLENGTH ; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtok(NULL, "$"); /* masked keys */
for (i = 0; i < OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS; i++)
cs.masked_keys[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
AES_KEY akey;
unsigned char mask_key[MAX_KEYS_PER_CRYPT][32];
unsigned char unmasked_keys[OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS];
unsigned char hashed_mask_key[20];
int i, j;
/* derive masking key from password */
#ifdef SSE_GROUP_SZ_SHA1
int lens[SSE_GROUP_SZ_SHA1];
unsigned char *pin[SSE_GROUP_SZ_SHA1], *pout[SSE_GROUP_SZ_SHA1];
for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) {
lens[i] = strlen(key_buffer[index+i]);
pin[i] = (unsigned char*)key_buffer[index+i];
pout[i] = mask_key[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens,
cur_salt->salt, OPENBSD_SOFTRAID_SALTLENGTH,
cur_salt->num_iterations, (unsigned char**)pout,
32, 0);
#else
pbkdf2_sha1((const unsigned char*)(key_buffer[index]),
strlen(key_buffer[index]),
cur_salt->salt, OPENBSD_SOFTRAID_SALTLENGTH,
cur_salt->num_iterations, mask_key[0],
32, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
#if !ARCH_LITTLE_ENDIAN
alter_endianity(mask_key[i], 32);
#endif
/* decrypt sector keys */
AES_set_decrypt_key(mask_key[i], 256, &akey);
for(j = 0; j < (OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS) / 16; j++) {
AES_decrypt(&cur_salt->masked_keys[16*j], &unmasked_keys[16*j], &akey);
}
/* get SHA1 of mask_key */
SHA1(mask_key[i], 32, hashed_mask_key);
/* get HMAC-SHA1 of unmasked_keys using hashed_mask_key */
HMAC(EVP_sha1(), hashed_mask_key, OPENBSD_SOFTRAID_MACLENGTH,
unmasked_keys, OPENBSD_SOFTRAID_KEYLENGTH * OPENBSD_SOFTRAID_KEYS,
(unsigned char*)crypt_out[index+i], NULL);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (*(ARCH_WORD_32*)binary == *(ARCH_WORD_32*)(crypt_out[index]))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return (*(ARCH_WORD_32*)binary == *(ARCH_WORD_32*)(crypt_out[index]));
}
static int cmp_exact(char *source, int index)
{
void *bin = binary(source);
return !memcmp(bin, crypt_out[index], 20);
}
static void jtr_set_key(char* key, int index)
{
strcpy(key_buffer[index], key);
}
static char *get_key(int index)
{
return key_buffer[index];
}
#if FMT_MAIN_VERSION > 11
/* report iteration count as tunable cost */
static unsigned int iteration_count(void *salt)
{
return ((struct custom_salt*)salt)->num_iterations;
}
#endif
static struct fmt_tests tests_openbsdsoftraid[] = {
// too long of line was causing my Sparc box to fail to compile this code
{"\
$openbsd-softraid$8192$c2891132ca5305d1189a7da94d32de29182abc2f56dc641d685e471935f2646e06b79f1d6c102c2f62f3757a20efb0a110b8ae207f9129f0dc5eea8ab05cc8280e0ba2460faf979dbac9f577c4a083349064364556b7ad15468c17c4d794c3da0ddf5990cc66751a6ded8d534531dd9aa9fce2f43e68d6a7200e135beb55e752$311c42d1d8daf1e47e0150c8d4a455a0567b062970c1838faaedcd3e43795545de64971c7598902a6e2c3fffcf8abe2ef78979164d0c9089fbb931c4c9dac8b86c85eeace11095e38487e41eb7b6094d96c339e86686121fbe1c32dbff3c00706926b22ec3a1329f346c599d132105b5d182a380161504d535f9836bb7286331adce1e47e4e251a0249612a94312bb309a6f4558568467731c1ae8c9b910d27102dca2a72228ffde7bfc60004c8ab33ca2b01aa476c4f42f99a3d1f904e3bbc56270edb314a62e92cf68185ace93731ef4ce08dff3c695c45e35b57ed8ab1552114635eb2ff531437ba5c3a08ebf3e73b6bbb7fe1ad98373da349f09284ae819b6a2f6fc5a10aec347f3c2331abc1d6617e77d68f314fdb683294f3ef351869491c4fb096969924215d711c15e5fce533dc5acaed4a473b14c595bababc178e62ef065770716520ecddc7cbf1cbed1250b7e004ab975bc29780c952087ec382bf6e77447720a10a8c2993262a2b21f8a3f47e35daa5b620573626b474d3e8abf8e73164664b041a18fe35c2a1905fad617bf6e6c380fdeeb680fa89b6c6dc7676ad93fde25076ecb8855d623b45af9a16a62a957d85c4c70896019be1827ad9320a69f18bdfc2674f04babdbfcd679c0ef22f7ab2a18818b9b425e61d8c06196a23babd0aefd5a00f1b297a66d973daae40f4dbd9be60d8953fafbd51f7745e2d04b5c80b63ad1f550cd939490b346d4fe7c1fc266d593bcafac0d8989994e174de6d1ef4ce78b3224ea4e68ccbf998654a067558537be332f5cae4b44c18664428d45b71cde5b53bedddf8a7daf47fce212578b72\
7e420c91de0baa1108683dd5b5534e81f4fe945d27fd9d28934afc8d15d95932952c0be717d4d87bb8255bf658a083c3aed643f7a6cfb56fbcbdab9e0a7348b0a3a91e3d560d1ec96f5769551e64beb54a499f6d6dd37e4361d484fe4f7bac4dc26c8a1a2609592d527b134c8212d71b3578217e0ec1da317c69e7e8c39d2d5b2d4073fa9c618a01a092b61613f6f1e41e6ab43d8ca010f177947aeab2884e9a4dd28453ff5bdadb765680733e7af1463ec1b20b879ae01c9256da0207811f956b3950f6db743a9e34a6d8f0fdfa5c47b4f807f0017c2092d72dc19d111711e796ffc4035da3a4caa6a5301491d0473b0d47cd01b705ff11a10263867013a11c65462c311fa5ac9a2598142779b55f09dbec89ac18049c29e5baf3aa38696a3b92d08b02cb10af5389e06058b3ad8be09b121e4e320520413775b7c6fbb3f2b332e3ac0295a4a4dfb4a56ea1c32bc28c149ffaa3b426f5a17a11afe56426b38966c86734654fe05a611c8f025ee4092656c097bbf59743c31508fa9e80ff86a2ae33d401ec316e65eef251d173e9565ffc1672b8b341174427a851a6a4c42554848c637283d13d4ba5b5414b4e61ade6ec7ef7b77186a81adff381e6a79d3dac2c68bf386f100fef1c354221a2ba3d8a7a10460f637eaa152ab79027ab94e5965660de3ed66dac4a0f8e75b85d768e51c8e82a26cb81249ca8d249d8c5cdc8bd55289679d3915a397d31863334df18e2fe3ef9069b064c4ef6b418e5388817040ae9922e5e9f57a8bf3b3fe04748b9cf5068ac86f942b4068853602a6c6c794423569b665b359d5f947c2e5ff194d23d953b435b2b3834513fdfda2b66fcea22883690b1cc56c2fcaa5600895ff8d8ae9e3a6a2b6258ff873242d1128b20e7d1e843ade1bd206b541eba02a214a95cd83860865f947cb4adbd465957055060df05e53fa9ea4b29867c92b224be939d3715be0e61b7aa0e24a8f25bccfa3b7901a3f0a8cb25498d7c9899d435b409220723dcde1d38ab6d4e7cfb42d443c9b65a37\
53891f46adb9bc52574699a7b642955702ed662d04cbe21aeec7c15db7e325dcaa74c85c5e3ed54424642d5bd8d3109c2d4c0079b3d2c5f2da12ad5b25407ae48f6fe4fc653b23a7f2d56a93c898dd0bd59ba02295934c9f7ffb433ef611d51b7c203f374cf9e8b69d4952ccc44593447ad41540270b0e30c349401048cbce10a0e1bae373de15c878982b0af837fb5432cd2471516d1e218296ce462a59fd5412921bbd3f75cf65070f7bafe21105ba83f7ffe8ece71534863c0dd731a2f3c29fff97b8ce798890a1b158a8891bb6f2dd751e75c0cb0db7ea152d7cdc91663f46f85d12ce0015351dba5225b2a87b64cc30518b23e31b2bfbb0b2a5042eeaea1234a57549a3e55ddd708e3380df032e93071b10b3e6902152c90ffd99bda0177a197779341307c5d9f335e698259ade70564eab9d2856aa1aa814211e71ba2885ef9cd5f5bdd225af2f6eebf775cc0bbdb3e519edb7c49a9a1984cc0cc012679aca8fd1d002fa64b2df095b4a9e2b496e3f4b544955c817efb29562cf8b3d2eeccbe4d364ce71d2d12b504b11de4747139ef505bdd12f382eb02fa3f5272b710644a9c20660ca5b4fa74be60984240b555c1f34261ee1d72d9eb2cc680f32b4603865503addc3a1fdc49d2b158d3407a282edd72ef51ad021338fdebf413726e1778e3bc3909b670d3f40e824391c5525b162ea01c29205e12f8e62bdd8cd0f21f6f7b44af4521c2dd23a7f3508e5dc6fffa3365e4ca1cac33bb515a5c5495dc059a94396de7d802758b65bb4cecb90bf69ab4126eab85958cb8b64eedf3a0955ab42cdc98ef90620e10cc854b9c02bfaff60742494a0c3bb34ef6d6bb861b275d975bdc4a10ac922dc70c1b03a4c01943a704af36ec8d79cf2f9ce0f602f01bef4a32edeb8fbba863c945552efc814410ac6bb839349ea65879644003bdda35d40eabdc9dcfb2d67d945b7f111ab62591763a0dd2d338594eff004237e5acce69dd9d2cdbb9ce121bd$5337e4ba9d877a1e84559688386fbc844c5fe557", "password1" },
{NULL}
};
#ifdef MMX_COEF
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_N_STR MMX_TYPE
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#endif
struct fmt_main fmt_openbsd_softraid = {
{
"OpenBSD-SoftRAID", // FORMAT_LABEL
"", // FORMAT_NAME
ALGORITHM_NAME,
" (8192 iterations)", // BENCHMARK_COMMENT
-1, // BENCHMARK_LENGTH
PLAINTEXT_LENGTH,
sizeof(ARCH_WORD_32), //BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
#if FMT_MAIN_VERSION > 11
{
"iteration count",
},
#endif
tests_openbsdsoftraid
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{
iteration_count,
},
#endif
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
set_salt,
jtr_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
atomic_messages.c | // RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s
int foo() {
L1:
foo();
#pragma omp atomic
// expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected an expression statement}}
{
foo();
goto L1; // expected-error {{use of undeclared label 'L1'}}
}
goto L2; // expected-error {{use of undeclared label 'L2'}}
#pragma omp atomic
// expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected an expression statement}}
{
foo();
L2:
foo();
}
return 0;
}
struct S {
int a;
};
int readint() {
int a = 0, b = 0;
// Test for atomic read
#pragma omp atomic read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected an expression statement}}
;
#pragma omp atomic read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected built-in assignment operator}}
foo();
#pragma omp atomic read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected built-in assignment operator}}
a += b;
#pragma omp atomic read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected lvalue expression}}
a = 0;
#pragma omp atomic read
a = b;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}}
#pragma omp atomic read read
a = b;
return 0;
}
int readS() {
struct S a, b;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}}
#pragma omp atomic read read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected expression of scalar type}}
a = b;
return a.a;
}
int writeint() {
int a = 0, b = 0;
// Test for atomic write
#pragma omp atomic write
// expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}}
// expected-note@+1 {{expected an expression statement}}
;
#pragma omp atomic write
// expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}}
// expected-note@+1 {{expected built-in assignment operator}}
foo();
#pragma omp atomic write
// expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}}
// expected-note@+1 {{expected built-in assignment operator}}
a += b;
#pragma omp atomic write
a = 0;
#pragma omp atomic write
a = b;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}}
#pragma omp atomic write write
a = b;
return 0;
}
int writeS() {
struct S a, b;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}}
#pragma omp atomic write write
// expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}}
// expected-note@+1 {{expected expression of scalar type}}
a = b;
return a.a;
}
int updateint() {
int a = 0, b = 0;
// Test for atomic update
#pragma omp atomic update
// expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected an expression statement}}
;
#pragma omp atomic
// expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected built-in binary or unary operator}}
foo();
#pragma omp atomic
// expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected built-in binary operator}}
a = b;
#pragma omp atomic update
// expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}}
a = b || a;
#pragma omp atomic update
// expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}}
a = a && b;
#pragma omp atomic update
// expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected in right hand side of expression}}
a = (float)a + b;
#pragma omp atomic
// expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected in right hand side of expression}}
a = 2 * b;
#pragma omp atomic
// expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected in right hand side of expression}}
a = b + *&a;
#pragma omp atomic update
*&a = *&a + 2;
#pragma omp atomic update
a++;
#pragma omp atomic
++a;
#pragma omp atomic update
a--;
#pragma omp atomic
--a;
#pragma omp atomic update
a += b;
#pragma omp atomic
a %= b;
#pragma omp atomic update
a *= b;
#pragma omp atomic
a -= b;
#pragma omp atomic update
a /= b;
#pragma omp atomic
a &= b;
#pragma omp atomic update
a ^= b;
#pragma omp atomic
a |= b;
#pragma omp atomic update
a <<= b;
#pragma omp atomic
a >>= b;
#pragma omp atomic update
a = b + a;
#pragma omp atomic
a = a * b;
#pragma omp atomic update
a = b - a;
#pragma omp atomic
a = a / b;
#pragma omp atomic update
a = b & a;
#pragma omp atomic
a = a ^ b;
#pragma omp atomic update
a = b | a;
#pragma omp atomic
a = a << b;
#pragma omp atomic
a = b >> a;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'update' clause}}
#pragma omp atomic update update
a /= b;
return 0;
}
int captureint() {
int a = 0, b = 0, c = 0;
// Test for atomic capture
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected compound statement}}
;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected assignment expression}}
foo();
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected built-in binary or unary operator}}
a = b;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected assignment expression}}
a = b || a;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}}
b = a = a && b;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected assignment expression}}
a = (float)a + b;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected assignment expression}}
a = 2 * b;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected assignment expression}}
a = b + *&a;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected exactly two expression statements}}
{ a = b; }
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected exactly two expression statements}}
{}
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected in right hand side of the first expression}}
{a = b;a = b;}
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected in right hand side of the first expression}}
{a = b; a = b || a;}
#pragma omp atomic capture
{b = a; a = a && b;}
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected in right hand side of expression}}
b = a = (float)a + b;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected in right hand side of expression}}
b = a = 2 * b;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected in right hand side of expression}}
b = a = b + *&a;
#pragma omp atomic capture
c = *&a = *&a + 2;
#pragma omp atomic capture
c = a++;
#pragma omp atomic capture
c = ++a;
#pragma omp atomic capture
c = a--;
#pragma omp atomic capture
c = --a;
#pragma omp atomic capture
c = a += b;
#pragma omp atomic capture
c = a %= b;
#pragma omp atomic capture
c = a *= b;
#pragma omp atomic capture
c = a -= b;
#pragma omp atomic capture
c = a /= b;
#pragma omp atomic capture
c = a &= b;
#pragma omp atomic capture
c = a ^= b;
#pragma omp atomic capture
c = a |= b;
#pragma omp atomic capture
c = a <<= b;
#pragma omp atomic capture
c = a >>= b;
#pragma omp atomic capture
c = a = b + a;
#pragma omp atomic capture
c = a = a * b;
#pragma omp atomic capture
c = a = b - a;
#pragma omp atomic capture
c = a = a / b;
#pragma omp atomic capture
c = a = b & a;
#pragma omp atomic capture
c = a = a ^ b;
#pragma omp atomic capture
c = a = b | a;
#pragma omp atomic capture
c = a = a << b;
#pragma omp atomic capture
c = a = b >> a;
#pragma omp atomic capture
{ c = *&a; *&a = *&a + 2;}
#pragma omp atomic capture
{ *&a = *&a + 2; c = *&a;}
#pragma omp atomic capture
{c = a; a++;}
#pragma omp atomic capture
{++a;c = a;}
#pragma omp atomic capture
{c = a;a--;}
#pragma omp atomic capture
{--a;c = a;}
#pragma omp atomic capture
{c = a; a += b;}
#pragma omp atomic capture
{a %= b; c = a;}
#pragma omp atomic capture
{c = a; a *= b;}
#pragma omp atomic capture
{a -= b;c = a;}
#pragma omp atomic capture
{c = a; a /= b;}
#pragma omp atomic capture
{a &= b; c = a;}
#pragma omp atomic capture
{c = a; a ^= b;}
#pragma omp atomic capture
{a |= b; c = a;}
#pragma omp atomic capture
{c = a; a <<= b;}
#pragma omp atomic capture
{a >>= b; c = a;}
#pragma omp atomic capture
{c = a; a = b + a;}
#pragma omp atomic capture
{a = a * b; c = a;}
#pragma omp atomic capture
{c = a; a = b - a;}
#pragma omp atomic capture
{a = a / b; c = a;}
#pragma omp atomic capture
{c = a; a = b & a;}
#pragma omp atomic capture
{a = a ^ b; c = a;}
#pragma omp atomic capture
{c = a; a = b | a;}
#pragma omp atomic capture
{a = a << b; c = a;}
#pragma omp atomic capture
{c = a; a = b >> a;}
#pragma omp atomic capture
{c = a; a = foo();}
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'capture' clause}}
#pragma omp atomic capture capture
b = a /= b;
return 0;
}
|
square_symmetry.c | #include "smecy.h"
void square_symmetry_smecy(int width, int height, int *image,
int square_size, int x_offset, int y_offset) {
// Can be executed in parallel
#pragma omp parallel for
for(int i = 0; i < square_size/2; i++)
for(int j = 0; j < square_size; j++) {
int tmp = image[(y_offset + i)*height + x_offset + j];
image[(y_offset + i)*height + x_offset + j] = image[(y_offset + square_size - i)*height + x_offset + j];
image[(y_offset + square_size - i)*height + x_offset + j] = tmp;
}
}
|
DRB010-lastprivatemissing-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This loop has loop-carried output-dependence due to x=... at line 63.
The problem can be solved by using lastprivate(x) .
Data race pair: x@63:5 vs. x@63:5
*/
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i,x;
int len = 10000;
if (argc>1)
len = atoi(argv[1]);
#pragma omp parallel for lastprivate(x)
for (i=0;i<len;i++)
x=i;
printf("x=%d",x);
return 0;
}
|
sort.c | /*
This file is part of HiParTI!.
HiParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
HiParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with HiParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <assert.h>
#include <math.h>
#include <time.h>
#include <HiParTI.h>
static const uint32_t MASKS[] = {0x55555555, 0x33333333, 0x0F0F0F0F, 0x00FF00FF};
static const uint32_t SHIFTS[] = {1, 2, 4, 8};
void pti_SwapValuesMat(ptiSparseMatrix *mtx, ptiNnzIndex ind1, ptiNnzIndex ind2) {
ptiIndex eleind1;
eleind1 = mtx->rowind.data[ind1];
mtx->rowind.data[ind1] = mtx->rowind.data[ind2];
mtx->rowind.data[ind2] = eleind1;
eleind1 = mtx->colind.data[ind1];
mtx->colind.data[ind1] = mtx->colind.data[ind2];
mtx->colind.data[ind2] = eleind1;
ptiValue val1 = mtx->values.data[ind1];
mtx->values.data[ind1] = mtx->values.data[ind2];
mtx->values.data[ind2] = val1;
}
/* Compare functions */
int pti_SparseMatrixCompareIndicesMorton2D(
ptiSparseMatrix * const mtx1,
uint64_t loc1,
ptiSparseMatrix * const mtx2,
uint64_t loc2,
ptiElementIndex sb_bits)
{
uint64_t mkey1 = 0, mkey2 = 0;
/* Only support 3-D tensors, with 32-bit indices. */
uint32_t x1 = mtx1->rowind.data[loc1];
uint32_t y1 = mtx1->colind.data[loc1];
uint32_t x2 = mtx2->rowind.data[loc2];
uint32_t y2 = mtx2->colind.data[loc2];
/* Compare block indices */
ptiIndex blk_x1 = x1 >> sb_bits;
ptiIndex blk_y1 = y1 >> sb_bits;
ptiIndex blk_x2 = x2 >> sb_bits;
ptiIndex blk_y2 = y2 >> sb_bits;
if(blk_x1 < blk_x2) {
return -1;
} else if(blk_x1 > blk_x2) {
return 1;
} else if(blk_y1 < blk_y2) { // if blk_x1 == blk_x2
return -1;
} else if(blk_y1 > blk_y2) { // if blk_x1 == blk_x2
return 1;
}
/* blk_x1 == blk_x2, blk_y1 == blk_y2, sort inside a block in Z-Morton order */
uint64_t x = x1 - (blk_x1 << sb_bits);
uint64_t y = y1 - (blk_y1 << sb_bits);
x = (x | (x << SHIFTS[3])) & MASKS[3];
x = (x | (x << SHIFTS[2])) & MASKS[2];
x = (x | (x << SHIFTS[1])) & MASKS[1];
x = (x | (x << SHIFTS[0])) & MASKS[0];
y = (y | (y << SHIFTS[3])) & MASKS[3];
y = (y | (y << SHIFTS[2])) & MASKS[2];
y = (y | (y << SHIFTS[1])) & MASKS[1];
y = (y | (y << SHIFTS[0])) & MASKS[0];
mkey1 = y | (x << 1);
x = x2 - (blk_x2 << sb_bits);
y = y2 - (blk_y2 << sb_bits);
x = (x | (x << SHIFTS[3])) & MASKS[3];
x = (x | (x << SHIFTS[2])) & MASKS[2];
x = (x | (x << SHIFTS[1])) & MASKS[1];
x = (x | (x << SHIFTS[0])) & MASKS[0];
y = (y | (y << SHIFTS[3])) & MASKS[3];
y = (y | (y << SHIFTS[2])) & MASKS[2];
y = (y | (y << SHIFTS[1])) & MASKS[1];
y = (y | (y << SHIFTS[0])) & MASKS[0];
mkey2 = y | (x << 1);
if(mkey1 < mkey2) {
return -1;
} else if(mkey1 > mkey2) {
return 1;
} else {
return 0;
}
}
int pti_SparseMatrixCompareIndicesSingleMode(ptiSparseMatrix * const mtx1, ptiNnzIndex loc1, ptiSparseMatrix * const mtx2, ptiNnzIndex loc2, ptiIndex const mode)
{
ptiIndex eleind1, eleind2;
if (mode == 0) {
eleind1 = mtx1->rowind.data[loc1];
eleind2 = mtx2->rowind.data[loc2];
} else if (mode == 1) {
eleind1 = mtx1->colind.data[loc1];
eleind2 = mtx2->colind.data[loc2];
}
// printf("eleind1: %u (loc: %lu), eleind2: %u (loc: %lu)\n", eleind1, loc1, eleind2, loc2); fflush(stdout);
if(eleind1 < eleind2) {
return -1;
} else if(eleind1 > eleind2) {
return 1;
}
return 0;
}
int pti_SparseMatrixCompareIndicesRowBlock(
ptiSparseMatrix * const mtx1,
ptiNnzIndex loc1,
ptiSparseMatrix * const mtx2,
ptiNnzIndex loc2,
ptiElementIndex sk_bits)
{
ptiIndex eleind1 = mtx1->rowind.data[loc1];
ptiIndex eleind2 = mtx2->rowind.data[loc2];
ptiIndex blkind1 = eleind1 >> sk_bits;
ptiIndex blkind2 = eleind2 >> sk_bits;
// printf("blkind1: %lu, blkind2: %lu\n", blkind1, blkind2);
if(blkind1 < blkind2) {
return -1;
} else if(blkind1 > blkind2) {
return 1;
}
eleind1 = mtx1->colind.data[loc1];
eleind2 = mtx2->colind.data[loc2];
blkind1 = eleind1 >> sk_bits;
blkind2 = eleind2 >> sk_bits;
if(blkind1 < blkind2) {
return -1;
} else if(blkind1 > blkind2) {
return 1;
}
return 0;
}
/* Quick sort functions */
static void pti_QuickSortIndexMorton2D(ptiSparseMatrix *mtx, ptiNnzIndex l, ptiNnzIndex r, ptiElementIndex sb_bits)
{
uint64_t i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(pti_SparseMatrixCompareIndicesMorton2D(mtx, i, mtx, p, sb_bits) < 0) {
// printf("(%lu, %lu) result: %d\n", i, p, pti_SparseMatrixCompareIndicesMorton2D(mtx, i, mtx, p));
++i;
}
while(pti_SparseMatrixCompareIndicesMorton2D(mtx, p, mtx, j, sb_bits) < 0) {
// printf("(%lu, %lu) result: %d\n", p, j,pti_SparseMatrixCompareIndicesMorton2D(mtx, p, mtx, j));
--j;
}
if(i >= j) {
break;
}
pti_SwapValuesMat(mtx, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(mtx)
{
pti_QuickSortIndexMorton2D(mtx, l, i, sb_bits);
}
pti_QuickSortIndexMorton2D(mtx, i, r, sb_bits);
#pragma omp taskwait
}
static void pti_QuickSortIndexSingleMode(ptiSparseMatrix *mtx, ptiNnzIndex l, ptiNnzIndex r, ptiIndex mode)
{
// printf("l: %lu, r: %lu.\n", l, r); fflush(stdout);
ptiNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
// printf("i: %lu, j: %lu.\n", i, j); fflush(stdout);
while(pti_SparseMatrixCompareIndicesSingleMode(mtx, i, mtx, p, mode) < 0) {
++i;
}
while(pti_SparseMatrixCompareIndicesSingleMode(mtx, p, mtx, j, mode) < 0) {
--j;
}
if(i >= j) {
break;
}
// printf("new i: %lu (%u, %u), j: %lu (%u, %u).\n", i, mtx->rowind.data[i], mtx->colind.data[i], j, mtx->rowind.data[j], mtx->colind.data[j]); fflush(stdout);
pti_SwapValuesMat(mtx, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
// printf("p: %lu.\n", p); fflush(stdout);
}
#pragma omp task firstprivate(l,i) shared(mtx, mode)
{
pti_QuickSortIndexSingleMode(mtx, l, i, mode);
}
pti_QuickSortIndexSingleMode(mtx, i, r, mode);
#pragma omp taskwait
}
static void pti_QuickSortIndexRowBlock(ptiSparseMatrix *mtx, ptiNnzIndex l, ptiNnzIndex r, ptiElementIndex sk_bits)
{
ptiNnzIndex i, j, p;
if(r-l < 2) {
return;
}
p = (l+r) / 2;
for(i = l, j = r-1; ; ++i, --j) {
while(pti_SparseMatrixCompareIndicesRowBlock(mtx, i, mtx, p, sk_bits) < 0) {
++i;
}
while(pti_SparseMatrixCompareIndicesRowBlock(mtx, p, mtx, j, sk_bits) < 0) {
--j;
}
if(i >= j) {
break;
}
pti_SwapValuesMat(mtx, i, j);
if(i == p) {
p = j;
} else if(j == p) {
p = i;
}
}
#pragma omp task firstprivate(l,i) shared(mtx, sk_bits)
{
pti_QuickSortIndexRowBlock(mtx, l, i, sk_bits);
}
pti_QuickSortIndexRowBlock(mtx, i, r, sk_bits);
#pragma omp taskwait
}
/****************************
* Sorting functions
****************************/
void ptiSparseMatrixSortIndexMorton(
ptiSparseMatrix *mtx,
int force,
ptiNnzIndex begin,
ptiNnzIndex end,
ptiElementIndex sb_bits)
{
if(force) {
#pragma omp parallel
{
#pragma omp single nowait
{
pti_QuickSortIndexMorton2D(mtx, begin, end, sb_bits);
}
}
}
}
void ptiSparseMatrixSortIndexSingleMode(ptiSparseMatrix *mtx, int force, ptiIndex mode, int tk)
{
if(force) {
#pragma omp parallel num_threads(tk)
{
#pragma omp single nowait
{
pti_QuickSortIndexSingleMode(mtx, 0, mtx->nnz, mode);
}
}
}
}
/**
* Reorder the elements in a COO sparse matrix lexicographically, sorting by row major order.
* @param mtx the sparse matrix to operate on
*/
void ptiSparseMatrixSortIndexRowBlock(
ptiSparseMatrix *mtx,
int force,
ptiNnzIndex begin,
ptiNnzIndex end,
ptiElementIndex sk_bits)
{
if(force) {
#pragma omp parallel
{
#pragma omp single nowait
{
pti_QuickSortIndexRowBlock(mtx, begin, end, sk_bits);
}
}
}
}
/**
* Randomly shuffle all indices.
*
* @param[in] mtx matrix to be shuffled
* @param[out] map_inds records the randomly generated mapping
*
*/
void ptiGetRandomShuffledIndicesMat(ptiSparseMatrix *mtx, ptiIndex ** map_inds)
{
/* Get randomly renumbering indices */
for(ptiIndex m = 0; m < 2; ++m) {
ptiIndex dim_len;
if (m == 0) dim_len = mtx->nrows;
else if (m == 1) dim_len = mtx->ncols;
for(long int i = dim_len - 1; i > 0; --i) {
srand(m+i+1+time(NULL));
ptiIndex new_loc = (ptiIndex) (rand() % (i+1));
/* Swap i <-> new_loc */
ptiIndex tmp = map_inds[m][i];
map_inds[m][i] = map_inds[m][new_loc];
map_inds[m][new_loc] = tmp;
}
}
} |
omp_bug5.c | /******************************************************************************
* FILE: omp_bug5.c
* DESCRIPTION:
* Using SECTIONS, two threads initialize their own array and then add
* it to the other's array, however a deadlock occurs.
* AUTHOR: Blaise Barney 01/29/04
* LAST REVISED: 08/15/11
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 1000000
#define PI 3.1415926535
#define DELTA .01415926535
int main (int argc, char *argv[])
{
int nthreads, tid, i;
float a[N], b[N];
omp_lock_t locka, lockb;
/* Initialize the locks */
omp_init_lock(&locka);
omp_init_lock(&lockb);
/* Initialize the arrays */
for (i=0; i<N; i++) {
a[i]=0;
b[i]=0;
}
/* Fork a team of threads giving them their own copies of variables */
#pragma omp parallel shared(a, b, nthreads, locka, lockb) private(tid, i)
{
/* Obtain thread number and number of threads */
tid = omp_get_thread_num();
#pragma omp master
{
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n", tid);
#pragma omp barrier
#pragma omp sections nowait
{
#pragma omp section
{
omp_set_lock(&locka);
printf("Thread %d updating a[]\n",tid);
for (i=0; i<N; i++)
a[i] += DELTA * i;
omp_set_lock(&lockb);
printf("Thread %d updating b[]\n",tid);
for (i=0; i<N; i++)
b[i] += DELTA + i;
omp_unset_lock(&lockb);
omp_unset_lock(&locka);
}
#pragma omp section
{
omp_set_lock(&lockb);
printf("Thread %d updating b[]\n",tid);
for (i=0; i<N; i++)
b[i] += PI * i;
omp_set_lock(&locka);
printf("Thread %d adding b[] to a[]\n",tid);
for (i=0; i<N; i++)
a[i] += PI + i;
omp_unset_lock(&locka);
omp_unset_lock(&lockb);
}
} /* end of sections */
} /* end of parallel region */
printf("Sample results: %f %f %f %f\n",a[0],b[0],a[999999],b[999999]);
}
|
squareddifference_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
int ref_squareddifference_fp32(struct tensor* input_tensor_0, struct tensor* input_tensor_1,
struct tensor* output_tensor, int num_thread)
{
// dims size = 2 or 3
if (input_tensor_0->dim_num < 4)
{
float* input0 = input_tensor_0->data;
float* input1 = input_tensor_1->data;
float* output = output_tensor->data;
int total_size = output_tensor->elem_num;
for (int i = 0; i < total_size; i++)
{
output[i] = powf((input0[i] - input1[i]), 2);
}
return 0;
}
// dims size 3
else if (output_tensor->dim_num == 4)
{
int w = output_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = output_tensor->dims[1];
int size = h * w;
int c_step = h * w;
float* input0 = input_tensor_0->data;
float* input1 = input_tensor_1->data;
float* output = output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src0 = input0 + c_step * q;
float* src1 = input1 + c_step * q;
float* dst = output + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = powf((src0[i] - src1[i]), 2);
}
}
return 0;
}
return -1;
}
int ref_squareddifference_uint8(struct tensor* input_tensor_0, struct tensor* input_tensor_1,
struct tensor* output_tensor, int num_thread)
{
/* dequant */
uint8_t* input0_uint8 = input_tensor_0->data;
uint8_t* input1_uint8 = input_tensor_1->data;
uint8_t* output_uint8 = output_tensor->data;
float input0_scale = input_tensor_0->scale;
float input1_scale = input_tensor_1->scale;
float output_scale = output_tensor->scale;
int32_t input0_zero = input_tensor_0->zero_point;
int32_t input1_zero = input_tensor_1->zero_point;
int32_t output_zero = output_tensor->zero_point;
int input0_size = input_tensor_0->elem_num;
int input1_size = input_tensor_1->elem_num;
int output_size = output_tensor->elem_num;
float* input0 = ( float* )sys_malloc(input0_size * sizeof(float));
float* input1 = ( float* )sys_malloc(input1_size * sizeof(float));
float* output = ( float* )sys_malloc(output_size * sizeof(float));
for (int i = 0; i < input0_size; i++)
{
input0[i] = (( float )input0_uint8[i] - ( float )input0_zero) * input0_scale;
}
for (int i = 0; i < input1_size; i++)
{
input1[i] = (( float )input1_uint8[i] - ( float )input1_zero) * input1_scale;
}
// dims size = 2 or 3
if (input_tensor_0->dim_num < 4)
{
int total_size = output_tensor->elem_num;
for (int i = 0; i < total_size; i++)
{
output[i] = powf((input0[i] - input1[i]), 2);
}
return 0;
}
// dims size 3
else if (output_tensor->dim_num == 4)
{
int w = output_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = output_tensor->dims[1];
int size = h * w;
int c_step = h * w;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src0 = input0 + c_step * q;
float* src1 = input1 + c_step * q;
float* dst = output + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = powf((src0[i] - src1[i]), 2);
}
}
return 0;
}
/* quant */
for (int i = 0; i < output_size; i++)
{
int udata = round(output[i] / output_scale + output_zero);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[i] = udata;
}
sys_free(input0);
sys_free(input1);
sys_free(output);
return -1;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor_0;
struct tensor* input_tensor_1;
struct tensor* output_tensor;
int layout = ir_graph->graph_layout;
input_tensor_0 = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
input_tensor_1 = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
int ret = -1;
if (input_tensor_0->data_type == TENGINE_DT_FP32)
ret = ref_squareddifference_fp32(input_tensor_0, input_tensor_1, output_tensor, exec_graph->num_thread);
else if(input_tensor_0->data_type == TENGINE_DT_UINT8)
ret = ref_squareddifference_uint8(input_tensor_0, input_tensor_1, output_tensor, exec_graph->num_thread);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_squareddifference_ref_op(void* arg)
{
return register_builtin_node_ops(OP_SQUAREDDIFFERENCE, &hcl_node_ops);
}
int unregister_squareddifference_ref_op(void* arg)
{
return unregister_builtin_node_ops(OP_SQUAREDDIFFERENCE, &hcl_node_ops);
}
|
GB_binop__rdiv_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint8)
// A*D function (colscale): GB (_AxD__rdiv_uint8)
// D*A function (rowscale): GB (_DxB__rdiv_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint8)
// C=scalar+B GB (_bind1st__rdiv_uint8)
// C=scalar+B' GB (_bind1st_tran__rdiv_uint8)
// C=A+scalar GB (_bind2nd__rdiv_uint8)
// C=A'+scalar GB (_bind2nd_tran__rdiv_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (y, x, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT8 || GxB_NO_RDIV_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 8) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 8) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__second_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__second_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__second_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__second_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__second_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__second_uint32)
// A*D function (colscale): GB (_AxD__second_uint32)
// D*A function (rowscale): GB (_DxB__second_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__second_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__second_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_uint32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint32_t
// A type: uint32_t
// A pattern? 1
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = bij
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// true if values of A are not used
#define GB_A_IS_PATTERN \
1 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = y ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
1
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_UINT32 || GxB_NO_SECOND_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__second_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__second_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__second_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__second_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__second_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
bucle-forModificado.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#endif
int main(int argc, char ** argv)
{
int i, n = 9;
if(argc < 2) {
fprintf(stderr,"\n[ERROR] - Falta nº iteraciones \n");
exit(-1);
}
n = atoi(argv[1]);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i=0; i<n; i++)
printf("thread %d ejecuta la iteración %d del bucle\n",omp_get_thread_num(),i);
return(0);
} |
DRB005-indirectaccess1-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This program is extracted from a real application at LLNL.
Two pointers (xa1 and xa2) have a pair of values with a distance of 12.
They are used as start base addresses for two 1-D arrays.
Their index set has two indices with distance of 12: 999 +12 = 1011.
So there is loop carried dependence.
However, having loop carried dependence does not mean data races will always happen.
The iterations with loop carried dependence must be scheduled to
different threads in order for data races to happen.
In this example, we use schedule(static,1) to increase the chance that
the dependent loop iterations will be scheduled to different threads.
Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define N 180
int indexSet[N] = {
521, 523, 525, 527, 529, 531,
547, 549, 551, 553, 555, 557,
573, 575, 577, 579, 581, 583,
599, 601, 603, 605, 607, 609,
625, 627, 629, 631, 633, 635,
651, 653, 655, 657, 659, 661,
859, 861, 863, 865, 867, 869,
885, 887, 889, 891, 893, 895,
911, 913, 915, 917, 919, 923, // change original 921 to 923 = 911+12
937, 939, 941, 943, 945, 947,
963, 965, 967, 969, 971, 973,
989, 991, 993, 995, 997, 999,
1197, 1199, 1201, 1203, 1205, 1207,
1223, 1225, 1227, 1229, 1231, 1233,
1249, 1251, 1253, 1255, 1257, 1259,
1275, 1277, 1279, 1281, 1283, 1285,
1301, 1303, 1305, 1307, 1309, 1311,
1327, 1329, 1331, 1333, 1335, 1337,
1535, 1537, 1539, 1541, 1543, 1545,
1561, 1563, 1565, 1567, 1569, 1571,
1587, 1589, 1591, 1593, 1595, 1597,
1613, 1615, 1617, 1619, 1621, 1623,
1639, 1641, 1643, 1645, 1647, 1649,
1665, 1667, 1669, 1671, 1673, 1675,
1873, 1875, 1877, 1879, 1881, 1883,
1899, 1901, 1903, 1905, 1907, 1909,
1925, 1927, 1929, 1931, 1933, 1935,
1951, 1953, 1955, 1957, 1959, 1961,
1977, 1979, 1981, 1983, 1985, 1987,
2003, 2005, 2007, 2009, 2011, 2013};
int main (int argc, char* argv[])
{
// max index value is 2013. +1 to ensure a reference like base[2015]
// Pointers will never access the same offset as (xa2 = base + 2014).
double * base = (double*) malloc(sizeof(double)* (2013+1+2013+1));
if (base == 0)
{
printf ("Error in malloc(). Aborting ...\n");
return 1;
}
double * xa1 = base;
double * xa2 = xa1 + 2014;
int i;
// initialize segments touched by indexSet
#pragma omp target data map(from:base[0: 4028])
#pragma omp target parallel for
for (i =521; i<= 2025; ++i)
{
base[i]=0.5*i;
}
// default static even scheduling may not trigger data race, using static,1 instead.
#pragma omp target data map(tofrom:xa1[0: 2014], xa2[0:2014], indexSet[0: N])
#pragma omp target parallel for
for (i =0; i< N; ++i)
{
int idx = indexSet[i];
xa1[idx]+= 1.0 + i;
xa2[idx]+= 3.0 + i;
}
printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]);
free (base);
return 0;
}
|
generator_gemm_common.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include "generator_gemm_common.h"
#include "generator_common.h"
#include "generator_x86_instructions.h"
#include "libxsmm_main.h"
LIBXSMM_API_INTERN
int libxsmm_generator_gemm_get_rbp_relative_offset( libxsmm_gemm_stack_var stack_var ) {
/* The stack at exit of setup looks like this:
*
* 10th param (if applicable) <-- RBP+40
* 9th param (if applicable) <-- RBP+32
* 8th param (if applicable) <-- RBP+24
* 7th param (if applicable) <-- RBP+16
* Return address <-- RBP+8
* Entry/saved RBP <-- RBP
* prefetch A ptr <-- RBP-8
* prefetch B ptr <-- RBP-16
* Offset A array ptr <-- RBP-24
* Offset B array ptr <-- RBP-32
* Int8 scaling factor <-- RBP-40
* GEMM_scratch ptr in stack (to be filled) <-- RBP-48
* Eltwise bias ptr <-- RBP-56
* Eltwise output_ptr <-- RBP-64
* Eltwise buf1_ptr <-- RBP-72
* Eltwise buf2_ptr <-- RBP-80
*
* */
switch ( stack_var ) {
case LIBXSMM_GEMM_STACK_VAR_NONE:
return 0;
case LIBXSMM_GEMM_STACK_VAR_PFA_PTR:
return -8;
case LIBXSMM_GEMM_STACK_VAR_PFB_PTR:
return -16;
case LIBXSMM_GEMM_STACK_VAR_A_OFFS_BRGEMM_PTR:
return -24;
case LIBXSMM_GEMM_STACK_VAR_B_OFFS_BRGEMM_PTR:
return -32;
case LIBXSMM_GEMM_STACK_VAR_INT8_SCF:
return -40;
case LIBXSMM_GEMM_STACK_VAR_GEMM_SCRATCH_PTR:
return -48;
case LIBXSMM_GEMM_STACK_VAR_ELT_BIAS_PTR:
return -56;
case LIBXSMM_GEMM_STACK_VAR_ELT_OUTPUT_PTR:
return -64;
case LIBXSMM_GEMM_STACK_VAR_ELT_RELU_BITMASK_PTR:
return -72;
case LIBXSMM_GEMM_STACK_VAR_ELT_BUF1:
return -72;
case LIBXSMM_GEMM_STACK_VAR_ELT_BUF2:
return -80;
case LIBXSMM_GEMM_STACK_VAR_TRANS_EXT_BUF_B:
return -72;
case LIBXSMM_GEMM_STACK_VAR_TRANS_EXT_BUF_C:
return -80;
case LIBXSMM_GEMM_STACK_VAR_ELT_BITMAP_PTR:
return -72;
case LIBXSMM_GEMM_STACK_VAR_ELT_DECOMPRESS_BUF:
return -80;
case LIBXSMM_GEMM_STACK_VAR_ARG_7:
return 16;
case LIBXSMM_GEMM_STACK_VAR_ARG_8:
return 24;
case LIBXSMM_GEMM_STACK_VAR_ARG_9:
return 32;
case LIBXSMM_GEMM_STACK_VAR_ARG_10:
return 40;
default:
return 0;
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_getval_stack_var( libxsmm_generated_code* io_generated_code,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
libxsmm_gemm_stack_var stack_var,
unsigned int i_gp_reg ) {
int offset = libxsmm_generator_gemm_get_rbp_relative_offset(stack_var);
/* make sure we requested a legal stack var */
if (offset == 0) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_GENERAL );
return;
}
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RBP, LIBXSMM_X86_GP_REG_UNDEF, 0, offset, i_gp_reg, 0 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_setval_stack_var( libxsmm_generated_code* io_generated_code,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
libxsmm_gemm_stack_var stack_var,
unsigned int i_gp_reg ) {
int offset = libxsmm_generator_gemm_get_rbp_relative_offset(stack_var);
/* make sure we requested to set a legal stack var */
if (offset >= 0) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_GENERAL );
return;
}
libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, LIBXSMM_X86_GP_REG_RBP, LIBXSMM_X86_GP_REG_UNDEF, 0, offset, i_gp_reg, 1 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_fullvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const unsigned int i_arch,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_use_masking_a_c ) {
memset(io_micro_kernel_config, 0, sizeof(*io_micro_kernel_config)); /* avoid warning "maybe used uninitialized" */
if ( (i_arch <= LIBXSMM_TARGET_ARCH_GENERIC) || (i_arch > LIBXSMM_X86_ALLFEAT) ) {
io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else if ( i_arch <= LIBXSMM_X86_SSE42 ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
if ( i_arch == LIBXSMM_X86_GENERIC ) {
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_SHUFPD;
} else {
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPD;
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_SHUFPS;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPS;
}
} else if ( i_arch <= LIBXSMM_X86_AVX2 ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'y';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
}
} else {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
}
}
} else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 32;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'z';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 8;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else if ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else if ( LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPWSSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD;
} else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPBUSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD;
} else if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
/* C is 32bit, so we treat all 3 matrices as 32bit element arrays */
io_micro_kernel_config->vector_length = 16;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
if ( (i_use_masking_a_c == 0) ) {
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
}
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VDPBF16PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
/* shouldn't happen as we caught this case earlier */
io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
/* that should no happen */
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_halfvector( libxsmm_micro_kernel_config* io_micro_kernel_config,
const unsigned int i_arch,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_use_masking_a_c ) {
if ( (i_arch <= LIBXSMM_TARGET_ARCH_GENERIC) || (i_arch > LIBXSMM_X86_ALLFEAT) ) {
io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else if ( i_arch <= LIBXSMM_X86_SSE42 ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, redirecting to scalar, please fix the generation code!!!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c );
} else if ( i_arch <= LIBXSMM_X86_AVX2 ) {
io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 2;
io_micro_kernel_config->datatype_size = 8;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVDDUP;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
io_micro_kernel_config->vector_length = 4;
io_micro_kernel_config->datatype_size = 4;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
} else {
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS;
} else {
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS;
}
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
}
} else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) {
#if !defined(NDEBUG)
fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, AVX512 redirecting to fullvector!\n");
#endif
libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c );
} else {
/* should not happen */
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_init_micro_kernel_config_scalar( libxsmm_micro_kernel_config* io_micro_kernel_config,
const unsigned int i_arch,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_use_masking_a_c ) {
if ( ( i_arch <= LIBXSMM_TARGET_ARCH_GENERIC ) || ( i_arch > LIBXSMM_X86_ALLFEAT ) ) {
io_micro_kernel_config->instruction_set = LIBXSMM_TARGET_ARCH_GENERIC;
io_micro_kernel_config->vector_reg_count = 0;
io_micro_kernel_config->use_masking_a_c = 0;
io_micro_kernel_config->vector_name = 'a';
io_micro_kernel_config->vector_length = 0;
io_micro_kernel_config->datatype_size = 0;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
} else if ( i_arch <= LIBXSMM_X86_SSE42 ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSD;
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS;
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSS;
}
} else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) {
io_micro_kernel_config->instruction_set = i_arch;
io_micro_kernel_config->vector_reg_count = 16;
io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c;
io_micro_kernel_config->vector_name = 'x';
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 8;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSD;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSD;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SD;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
} else {
io_micro_kernel_config->vector_length = 1;
io_micro_kernel_config->datatype_size = 4;
io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF;
io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSS;
io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS;
if ( i_arch == LIBXSMM_X86_AVX ) {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSS;
} else {
io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SS;
io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF;
}
}
} else {
/* should not happen */
}
io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1;
io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ;
io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ;
io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ;
io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL;
io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ;
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_add_flop_counter( libxsmm_generated_code* io_generated_code,
const libxsmm_gemm_descriptor* i_xgemm_desc ) {
if ( io_generated_code->code_type == 0 ) {
char l_new_code[512];
const unsigned int l_max_code_length = sizeof(l_new_code) - 1;
int l_code_length = 0;
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifndef NDEBUG\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifdef _OPENMP\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#pragma omp atomic\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "libxsmm_num_total_flops += %u;\n", 2u * i_xgemm_desc->m * i_xgemm_desc->n * i_xgemm_desc->k);
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" );
libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_blocking,
const unsigned int i_k_blocking ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_kloop, 0);
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_kloop, i_k_blocking);
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_kloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_max_blocked_k,
const unsigned int i_kloop_complete ) {
LIBXSMM_UNUSED(i_m_blocking);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_kloop, i_max_blocked_k );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
if ( i_kloop_complete != 0 ) {
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_xgemm_desc->ldb * i_xgemm_desc->k * i_micro_kernel_config->datatype_size;
} else {
l_b_offset = i_xgemm_desc->k * i_micro_kernel_config->datatype_size;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_b, l_b_offset );
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_reduceloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 0);
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_reduceloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc) {
LIBXSMM_UNUSED(i_xgemm_desc);
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 1);
libxsmm_x86_instruction_alu_reg( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_reduce_count, i_gp_reg_mapping->gp_reg_reduce_loop);
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_n_blocking) {
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_blocking );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_mloop, 0 );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_nloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_n_blocking,
const unsigned int i_n_done ) {
if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/2)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/2)) );
} else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/4)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/4)) );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
/* B prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
unsigned int l_type_scaling;
if ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) ||
(LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) ) {
l_type_scaling = 2;
} else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_type_scaling = 4;
} else {
l_type_scaling = 1;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_b_prefetch,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/l_type_scaling)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/l_type_scaling)) );
}
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c_prefetch,
(i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
#endif
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) {
/* handle trans B */
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size;
} else {
l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size;
}
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_b,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_help_0, l_b_offset );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_b,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
}
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
/* handle trans B */
int l_b_offset = 0;
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) {
l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size;
} else {
l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b, l_b_offset );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_a, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) );
}
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_header_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const unsigned int i_m_blocking ) {
libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_blocking );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_footer_mloop( libxsmm_generated_code* io_generated_code,
libxsmm_loop_label_tracker* io_loop_label_tracker,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_m_done ) {
/* advance C pointer */
if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size/2) );
} else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size/4) );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
/* C prefetch */
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size) );
}
#endif
/* B prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
unsigned int l_type_scaling;
if ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) ||
(LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) ) {
l_type_scaling = 2;
} else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_type_scaling = 4;
} else {
l_type_scaling = 1;
}
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size/l_type_scaling) );
}
}
/* A prefetch */
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) {
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) {
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) -
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a_prefetch,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
}
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a_prefetch,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) -
(i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
}
/* advance A pointer */
if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) {
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
0 );
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) );
libxsmm_x86_instruction_alu_mem( io_generated_code,
i_micro_kernel_config->alu_mov_instruction,
i_gp_reg_mapping->gp_reg_a,
i_gp_reg_mapping->gp_reg_reduce_loop, 8,
0,
i_gp_reg_mapping->gp_reg_help_0,
1 );
libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc);
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 );
} else {
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a,
((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) );
}
/* loop handling */
libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_done );
libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker );
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_load_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking ) {
unsigned int l_m_blocking, l_vec_reg_acc_start;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
assert(0 < i_micro_kernel_config->vector_length);
/* deriving register blocking from kernel config */
l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1;
/* start register of accumulator */
l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
#if !defined(NDEBUG)
/* Do some test if it is possible to generate the requested code.
This is not done in release mode and therefore bad
things might happen.... HUAAH */
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_GENERIC ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE42 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking != 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
#if 0
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif
#endif /*!defined(NDEBUG)*/
/* load C accumulator */
if (0 == (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=1 */
/* pure BF16 kernel */
if ( ( (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) ) &&
( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* we add when scaling during conversion to FP32 */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* load 16 bit values into ymm portion of the register */
if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU16,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'z',
0, 2, 1, 0 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'y',
0, 0, 1, 0 );
}
/* convert 16 bit values into 32 bit (integer convert) */
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VPMOVSXWD,
i_micro_kernel_config->vector_name,
0, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
/* shift 16 bits to the left to generate valid FP32 numbers */
libxsmm_x86_instruction_vec_compute_2reg_imm8(io_generated_code,
LIBXSMM_X86_INSTR_VPSLLD_I,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
16);
}
}
/* pure int8 kernel */
} else if ( ( (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) ) &&
( (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* we need to up convert int8 to int32 */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* load 16 bit values into xmm portion of the register */
if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU8,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/4),
'z',
0, 2, 1, 0 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/4),
'x',
0, 0, 1, 0 );
}
/* convert 8 bit values into 32 bit (integer convert) */
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED) != 0 ) {
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VPMOVZXBD,
i_micro_kernel_config->vector_name,
0, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
} else {
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VPMOVSXBD,
i_micro_kernel_config->vector_name,
0, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
}
}
} else {
/* adding to C, so let's load C */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* we only mask the last m-blocked load */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
i_micro_kernel_config->c_vmove_instruction,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 1, 0 );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
#endif
}
}
} else {
/* overwriting C, so let's xout accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
/* @TODO: cannot migrate to new encoder as this is also SSE */
libxsmm_x86_instruction_vec_compute_reg( io_generated_code,
io_generated_code->arch,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n),
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) );
}
#if 0
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) {
for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_c_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
#endif
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_store_C( libxsmm_generated_code* io_generated_code,
const libxsmm_gp_reg_mapping* i_gp_reg_mapping,
const libxsmm_micro_kernel_config* i_micro_kernel_config,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_m_blocking,
const unsigned int i_n_blocking )
{
/* deriving register blocking from kernel config */
unsigned int l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1;
/* register blocking counter in n */
unsigned int l_n = 0;
/* register blocking counter in m */
unsigned int l_m = 0;
/* start register of accumulator */
unsigned int l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking);
/* select store instruction */
unsigned int l_vstore = (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT == (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT & i_xgemm_desc->flags)) ? i_micro_kernel_config->c_vmove_nts_instruction : i_micro_kernel_config->c_vmove_instruction;
/* @TODO fix this test */
#if !defined(NDEBUG)
if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_GENERIC ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE42 ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX ||
i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) {
if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (i_m_blocking != i_micro_kernel_config->vector_length) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) {
if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK );
return;
}
} else {}
#if 0
if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) {
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK );
return;
}
#endif
#endif
if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) ) &&
( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* init stack with helper variables for SW-based RNE rounding */
/* push 0x7f800000 on the stack, naninf masking */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x7f800000);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
/* push 0x00010000 on the stack, fixup masking */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00010000);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
/* push 0x00007fff on the stack, rneadd */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00007fff);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
/* push 0x00000001 on the stack, fixup */
libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_2, 0x00000001);
libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
/* storing downconverted and rounded C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
/* and with naninf */
libxsmm_x86_instruction_vec_compute_mem( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPANDD,
1,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF,
0,
24,
i_micro_kernel_config->vector_name,
reg_X,
0 );
/* and with fixup */
libxsmm_x86_instruction_vec_compute_mem( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VPANDD,
1,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF,
0,
16,
i_micro_kernel_config->vector_name,
reg_X,
1 );
/* compute naninf mask k7 */
libxsmm_x86_instruction_vec_compute_mem_2reg_imm8( io_generated_code,
LIBXSMM_X86_INSTR_VPCMPD,
i_micro_kernel_config->vector_name,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF,
0,
24,
1,
0,
7,
4 );
/* compute fixup mask k6 */
libxsmm_x86_instruction_vec_compute_mem_2reg_imm8( io_generated_code,
LIBXSMM_X86_INSTR_VPCMPD,
i_micro_kernel_config->vector_name,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF,
0,
16,
1,
1,
6,
0 );
/* load rneadd */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VBROADCASTSS,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF, 0,
8,
i_micro_kernel_config->vector_name,
0, 0, 1, 0 );
/* load fixup */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VBROADCASTSS,
LIBXSMM_X86_GP_REG_RSP,
LIBXSMM_X86_GP_REG_UNDEF, 0,
0,
i_micro_kernel_config->vector_name,
1, 0, 1, 0 );
/* compute fixup */
libxsmm_x86_instruction_vec_compute_3reg_mask( io_generated_code,
LIBXSMM_X86_INSTR_VPADDD,
i_micro_kernel_config->vector_name,
1,
0,
0,
6,
0 );
/* compute fixup */
libxsmm_x86_instruction_vec_compute_3reg_mask( io_generated_code,
LIBXSMM_X86_INSTR_VPADDD,
i_micro_kernel_config->vector_name,
0,
reg_X,
reg_X,
7,
0 );
/* shift FP32 by 16bit to right */
libxsmm_x86_instruction_vec_compute_2reg_imm8(io_generated_code,
LIBXSMM_X86_INSTR_VPSRAD_I,
i_micro_kernel_config->vector_name,
reg_X,
reg_X,
16);
/* shift FP32 by 16bit to right */
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VPMOVDW,
i_micro_kernel_config->vector_name,
reg_X,
0 );
/* store 16 bit values into ymm portion of the register */
if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU16,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'z',
0, 2, 0, 1 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'y',
0, 0, 0, 1 );
}
}
}
/* clean stack and restore help5 */
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_2 );
} else if ( ( (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) && (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CPX) ) &&
( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* storing downconverted and rounded C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
unsigned int l_m_2_blocking = (l_m_blocking/2)*2;
l_m = 0;
if ( i_micro_kernel_config->use_masking_a_c != 0 ) {
for ( l_m = 0 ; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VCVTNEPS2BF16,
i_micro_kernel_config->vector_name,
reg_X, 0 );
/* store 16 bit values into ymm portion of the register */
if ( l_m == (l_m_blocking - 1) ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VMOVDQU16,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'z',
0, 2, 0, 1 );
} else {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'y',
0, 0, 0, 1 );
}
}
} else {
for (; l_m < l_m_2_blocking; l_m+=2 ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
unsigned int reg_X2 = l_vec_reg_acc_start + l_m+1 + (l_m_blocking * l_n);
libxsmm_x86_instruction_vec_compute_3reg( io_generated_code,
LIBXSMM_X86_INSTR_VCVTNE2PS2BF16,
i_micro_kernel_config->vector_name,
reg_X, reg_X2, 0 );
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'z',
0, 0, 0, 1 );
}
for (; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VCVTNEPS2BF16,
i_micro_kernel_config->vector_name,
reg_X, 0 );
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2),
'y',
0, 0, 0, 1 );
}
}
}
} else if ( ( (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) || (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) ) &&
( (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) {
/* pick the right instrucitons */
unsigned int inst_f32_i32 = ( ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED ) != 0 ) ? LIBXSMM_X86_INSTR_VCVTPS2UDQ : LIBXSMM_X86_INSTR_VCVTPS2DQ;
unsigned int inst_i32_i8 = ( ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED ) != 0 ) ? LIBXSMM_X86_INSTR_VPMOVUSDB : LIBXSMM_X86_INSTR_VPMOVSDB;
/* there are case where we need to load the scaling factor's address from the stack argument list */
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) != 0 ) {
libxsmm_x86_instruction_load_arg_to_reg( io_generated_code, 0, i_gp_reg_mapping->gp_reg_scf );
}
/* loading scf into register 3 */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
LIBXSMM_X86_INSTR_VBROADCASTSS,
i_gp_reg_mapping->gp_reg_scf,
LIBXSMM_X86_GP_REG_UNDEF, 0, 0,
i_micro_kernel_config->vector_name,
3, 0, 1, 0 );
/* Zero out register 0 to perform relu */
libxsmm_x86_instruction_vec_compute_3reg( io_generated_code,
i_micro_kernel_config->vxor_instruction,
i_micro_kernel_config->vector_name,
0,
0,
0);
/* storing downconverted and rounded C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n);
/* Convert result to F32 */
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
LIBXSMM_X86_INSTR_VCVTDQ2PS,
i_micro_kernel_config->vector_name,
reg_X,
reg_X );
/* Multiply with scaling factor */
libxsmm_x86_instruction_vec_compute_3reg( io_generated_code,
LIBXSMM_X86_INSTR_VMULPS,
i_micro_kernel_config->vector_name,
reg_X,
3,
reg_X );
/* Perform RELU */
libxsmm_x86_instruction_vec_compute_3reg( io_generated_code,
LIBXSMM_X86_INSTR_VMAXPS,
i_micro_kernel_config->vector_name,
reg_X,
0,
reg_X);
/* Round result to int32 */
libxsmm_x86_instruction_vec_compute_2reg( io_generated_code,
inst_f32_i32,
i_micro_kernel_config->vector_name,
reg_X, reg_X );
/* down-convert to int8 */
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
inst_i32_i8,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/4),
i_micro_kernel_config->vector_name,
reg_X, ( ( l_m == (l_m_blocking - 1)) && ( i_micro_kernel_config->use_masking_a_c != 0 ) ) ? 2 : 0, 0, 1 );
}
}
} else {
/* storing C accumulator */
for ( l_n = 0; l_n < i_n_blocking; l_n++ ) {
for ( l_m = 0; l_m < l_m_blocking; l_m++ ) {
libxsmm_x86_instruction_vec_move( io_generated_code,
i_micro_kernel_config->instruction_set,
l_vstore,
i_gp_reg_mapping->gp_reg_c,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size),
i_micro_kernel_config->vector_name,
l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 0, 1 );
}
if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ||
i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) {
if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) {
/* determining how many prefetches we need in M direction as we just need one prefetch per cache line */
unsigned int l_m_advance = 64 / ((i_micro_kernel_config->vector_length) * (i_micro_kernel_config->datatype_size)); /* 64: hardcoded cache line length */
for (l_m = 0; l_m < l_m_blocking; l_m += l_m_advance ) {
libxsmm_x86_instruction_prefetch( io_generated_code,
i_micro_kernel_config->prefetch_instruction,
i_gp_reg_mapping->gp_reg_b_prefetch,
LIBXSMM_X86_GP_REG_UNDEF, 0,
((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size));
}
}
}
}
}
}
LIBXSMM_API_INTERN
void libxsmm_generator_gemm_initialize_avx512_mask( libxsmm_generated_code* io_generated_code,
const unsigned int i_gp_reg_tmp,
const libxsmm_gemm_descriptor* i_xgemm_desc,
const unsigned int i_mask_count ) {
unsigned int l_mask;
/* init full mask */
if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) {
l_mask = 0xff;
} else {
l_mask = 0xffff;
}
/* shift right by "inverse" remainder */
l_mask = l_mask >> i_mask_count;
/* move mask to GP register */
libxsmm_x86_instruction_alu_imm( io_generated_code,
LIBXSMM_X86_INSTR_MOVQ,
i_gp_reg_tmp,
l_mask );
if ( ( io_generated_code->arch >= LIBXSMM_X86_AVX512 ) && ( io_generated_code->arch <= LIBXSMM_X86_ALLFEAT ) ) {
libxsmm_x86_instruction_mask_move( io_generated_code,
LIBXSMM_X86_INSTR_KMOVW_GPR_LD,
i_gp_reg_tmp,
LIBXSMM_X86_AVX512_MASK );
if ( ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) {
libxsmm_x86_instruction_mask_move( io_generated_code,
LIBXSMM_X86_INSTR_KMOVD_GPR_LD,
i_gp_reg_tmp,
2 );
} else if ( ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) {
libxsmm_x86_instruction_mask_move( io_generated_code,
LIBXSMM_X86_INSTR_KMOVQ_GPR_LD,
i_gp_reg_tmp,
2 );
} else {
/* no addtional mask is needed */
}
} else {
/* shouldn't happen */
LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH );
return;
}
}
|
OpenNL_psm.c | #include "OpenNL_psm.h"
/*
* Copyright (c) 2004-2010, Bruno Levy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the ALICE Project-Team nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* If you modify this software, you should include a notice giving the
* name of the person performing the modification, the date of modification,
* and the reason for such modification.
*
* Contact: Bruno Levy
*
* levy@loria.fr
*
* ALICE Project
* LORIA, INRIA Lorraine,
* Campus Scientifique, BP 239
* 54506 VANDOEUVRE LES NANCY CEDEX
* FRANCE
*
*/
/*
* This file is a PSM (pluggable software module)
* generated from the distribution of Geogram.
*
* See Geogram documentation on:
* http://alice.loria.fr/software/geogram/doc/html/index.html
*
* See documentation of the functions bundled in this PSM on:
* http://alice.loria.fr/software/geogram/doc/html/nl_8h.html
*/
/******* extracted from nl_private.h *******/
#ifndef OPENNL_PRIVATE_H
#define OPENNL_PRIVATE_H
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#if defined(__APPLE__) && defined(__MACH__)
#define NL_OS_APPLE
#endif
#if defined(__linux__) || defined(__ANDROID__) || defined(NL_OS_APPLE)
#define NL_OS_UNIX
#endif
#if defined(WIN32) || defined(_WIN64)
#define NL_OS_WINDOWS
#endif
#define nl_arg_used(x) (void)x
#if defined(__clang__) || defined(__GNUC__)
#define NL_NORETURN __attribute__((noreturn))
#else
#define NL_NORETURN
#endif
#if defined(_MSC_VER)
#define NL_NORETURN_DECL __declspec(noreturn)
#else
#define NL_NORETURN_DECL
#endif
NL_NORETURN_DECL void nl_assertion_failed(
const char* cond, const char* file, int line
) NL_NORETURN;
NL_NORETURN_DECL void nl_range_assertion_failed(
double x, double min_val, double max_val, const char* file, int line
) NL_NORETURN;
NL_NORETURN_DECL void nl_should_not_have_reached(
const char* file, int line
) NL_NORETURN;
#define nl_assert(x) { \
if(!(x)) { \
nl_assertion_failed(#x,__FILE__, __LINE__) ; \
} \
}
#define nl_range_assert(x,min_val,max_val) { \
if(((x) < (min_val)) || ((x) > (max_val))) { \
nl_range_assertion_failed(x, min_val, max_val, \
__FILE__, __LINE__ \
) ; \
} \
}
#define nl_assert_not_reached { \
nl_should_not_have_reached(__FILE__, __LINE__) ; \
}
#ifdef NL_DEBUG
#define nl_debug_assert(x) nl_assert(x)
#define nl_debug_range_assert(x,min_val,max_val) \
nl_range_assert(x,min_val,max_val)
#else
#define nl_debug_assert(x)
#define nl_debug_range_assert(x,min_val,max_val)
#endif
#ifdef NL_PARANOID
#define nl_parano_assert(x) nl_assert(x)
#define nl_parano_range_assert(x,min_val,max_val) \
nl_range_assert(x,min_val,max_val)
#else
#define nl_parano_assert(x)
#define nl_parano_range_assert(x,min_val,max_val)
#endif
void nlError(const char* function, const char* message) ;
void nlWarning(const char* function, const char* message) ;
NLdouble nlCurrentTime(void);
typedef void* NLdll;
#define NL_LINK_NOW 1
#define NL_LINK_LAZY 2
#define NL_LINK_GLOBAL 4
#define NL_LINK_QUIET 8
#define NL_LINK_USE_FALLBACK 16
NLdll nlOpenDLL(const char* filename, NLenum flags);
void nlCloseDLL(NLdll handle);
NLfunc nlFindFunction(NLdll handle, const char* funcname);
/* classic macros */
#ifndef MIN
#define MIN(x,y) (((x) < (y)) ? (x) : (y))
#endif
#ifndef MAX
#define MAX(x,y) (((x) > (y)) ? (x) : (y))
#endif
#define NL_NEW(T) (T*)(calloc(1, sizeof(T)))
#define NL_NEW_ARRAY(T,NB) (T*)(calloc((size_t)(NB),sizeof(T)))
#define NL_RENEW_ARRAY(T,x,NB) (T*)(realloc(x,(size_t)(NB)*sizeof(T)))
#define NL_DELETE(x) free(x); x = NULL
#define NL_DELETE_ARRAY(x) free(x); x = NULL
#define NL_CLEAR(T, x) memset(x, 0, sizeof(T))
#define NL_CLEAR_ARRAY(T,x,NB) memset(x, 0, (size_t)(NB)*sizeof(T))
#define NL_UINT_MAX 0xffffffff
#define NL_USHORT_MAX 0xffff
extern NLprintfFunc nl_printf;
extern NLfprintfFunc nl_fprintf;
#endif
/******* extracted from nl_blas.h *******/
#ifndef OPENNL_BLAS_H
#define OPENNL_BLAS_H
struct NLBlas;
typedef struct NLBlas* NLBlas_t;
typedef enum {
NoTranspose=0, Transpose=1, ConjugateTranspose=2
} MatrixTranspose ;
typedef enum {
UpperTriangle=0, LowerTriangle=1
} MatrixTriangle ;
typedef enum {
UnitTriangular=0, NotUnitTriangular=1
} MatrixUnitTriangular ;
typedef enum {
NL_HOST_MEMORY, NL_DEVICE_MEMORY
} NLmemoryType;
typedef void* (*FUNPTR_malloc)(
NLBlas_t blas, NLmemoryType type, size_t size
);
typedef void (*FUNPTR_free)(
NLBlas_t blas, NLmemoryType type, size_t size, void* ptr
);
typedef void (*FUNPTR_memcpy)(
NLBlas_t blas,
void* to, NLmemoryType to_type,
void* from, NLmemoryType from_type,
size_t size
);
typedef void (*FUNPTR_dcopy)(
NLBlas_t blas, int n, const double *x, int incx, double *y, int incy
);
typedef void (*FUNPTR_dscal)(
NLBlas_t blas, int n, double a, double *x, int incx
);
typedef double (*FUNPTR_ddot)(
NLBlas_t blas, int n, const double *x, int incx, const double *y, int incy
);
typedef double (*FUNPTR_dnrm2)(NLBlas_t blas, int n, const double *x, int incx);
typedef void (*FUNPTR_daxpy)(
NLBlas_t blas, int n,
double a, const double *x, int incx, double *y, int incy
);
typedef void (*FUNPTR_dgemv)(
NLBlas_t blas, MatrixTranspose trans, int m, int n, double alpha,
const double *A, int ldA, const double *x, int incx,
double beta, double *y, int incy
);
typedef void (*FUNPTR_dtpsv)(
NLBlas_t blas, MatrixTriangle uplo, MatrixTranspose trans,
MatrixUnitTriangular diag, int n, const double *AP,
double *x, int incx
);
struct NLBlas {
FUNPTR_malloc Malloc;
FUNPTR_free Free;
FUNPTR_memcpy Memcpy;
FUNPTR_dcopy Dcopy;
FUNPTR_dscal Dscal;
FUNPTR_ddot Ddot;
FUNPTR_dnrm2 Dnrm2;
FUNPTR_daxpy Daxpy;
FUNPTR_dgemv Dgemv;
FUNPTR_dtpsv Dtpsv;
NLboolean has_unified_memory;
double start_time;
NLulong flops;
NLulong used_ram[2];
NLulong max_used_ram[2];
/*
* Used for stats of the linear solver
* (a bit ugly, should not be here, but
* more convenient for now...)
*/
double sq_rnorm;
double sq_bnorm;
};
NLboolean nlBlasHasUnifiedMemory(NLBlas_t blas);
void nlBlasResetStats(NLBlas_t blas);
double nlBlasGFlops(NLBlas_t blas);
NLulong nlBlasUsedRam(NLBlas_t blas, NLmemoryType type);
NLulong nlBlasMaxUsedRam(NLBlas_t blas, NLmemoryType type);
NLBlas_t nlHostBlas(void);
#define NL_NEW_VECTOR(blas, memtype, dim) \
(double*)blas->Malloc(blas,memtype,(size_t)(dim)*sizeof(double))
#define NL_DELETE_VECTOR(blas, memtype, dim, ptr) \
blas->Free(blas,memtype,(size_t)(dim)*sizeof(double),ptr)
#endif
/******* extracted from nl_matrix.h *******/
#ifndef OPENNL_MATRIX_H
#define OPENNL_MATRIX_H
#ifdef __cplusplus
extern "C" {
#endif
/* Abstract matrix interface */
struct NLMatrixStruct;
typedef struct NLMatrixStruct* NLMatrix;
typedef void(*NLDestroyMatrixFunc)(NLMatrix M);
typedef void(*NLMultMatrixVectorFunc)(NLMatrix M, const double* x, double* y);
#define NL_MATRIX_SPARSE_DYNAMIC 0x1001
#define NL_MATRIX_CRS 0x1002
#define NL_MATRIX_SUPERLU_EXT 0x1003
#define NL_MATRIX_CHOLMOD_EXT 0x1004
#define NL_MATRIX_FUNCTION 0x1005
#define NL_MATRIX_OTHER 0x1006
struct NLMatrixStruct {
NLuint m;
NLuint n;
NLenum type;
NLDestroyMatrixFunc destroy_func;
NLMultMatrixVectorFunc mult_func;
};
NLAPI void NLAPIENTRY nlDeleteMatrix(NLMatrix M);
NLAPI void NLAPIENTRY nlMultMatrixVector(
NLMatrix M, const double* x, double* y
);
/* Dynamic arrays for sparse row/columns */
typedef struct {
NLuint index;
NLdouble value;
} NLCoeff;
typedef struct {
NLuint size;
NLuint capacity;
NLCoeff* coeff;
} NLRowColumn;
NLAPI void NLAPIENTRY nlRowColumnConstruct(NLRowColumn* c);
NLAPI void NLAPIENTRY nlRowColumnDestroy(NLRowColumn* c);
NLAPI void NLAPIENTRY nlRowColumnGrow(NLRowColumn* c);
NLAPI void NLAPIENTRY nlRowColumnAdd(
NLRowColumn* c, NLuint index, NLdouble value
);
NLAPI void NLAPIENTRY nlRowColumnAppend(
NLRowColumn* c, NLuint index, NLdouble value
);
NLAPI void NLAPIENTRY nlRowColumnZero(NLRowColumn* c);
NLAPI void NLAPIENTRY nlRowColumnClear(NLRowColumn* c);
NLAPI void NLAPIENTRY nlRowColumnSort(NLRowColumn* c);
/* Compressed Row Storage */
typedef struct {
NLuint m;
NLuint n;
NLenum type;
NLDestroyMatrixFunc destroy_func;
NLMultMatrixVectorFunc mult_func;
NLdouble* val;
NLuint* rowptr;
NLuint* colind;
NLuint nslices;
NLuint* sliceptr;
NLboolean symmetric_storage;
} NLCRSMatrix;
NLAPI void NLAPIENTRY nlCRSMatrixConstruct(
NLCRSMatrix* M, NLuint m, NLuint n, NLuint nnz, NLuint nslices
);
NLAPI void NLAPIENTRY nlCRSMatrixConstructSymmetric(
NLCRSMatrix* M, NLuint n, NLuint nnz
);
NLAPI NLboolean NLAPIENTRY nlCRSMatrixLoad(
NLCRSMatrix* M, const char* filename
);
NLAPI NLboolean NLAPIENTRY nlCRSMatrixSave(
NLCRSMatrix* M, const char* filename
);
NLAPI NLuint NLAPIENTRY nlCRSMatrixNNZ(NLCRSMatrix* M);
/* SparseMatrix data structure */
#define NL_MATRIX_STORE_ROWS 1
#define NL_MATRIX_STORE_COLUMNS 2
#define NL_MATRIX_STORE_SYMMETRIC 4
typedef struct {
NLuint m;
NLuint n;
NLenum type;
NLDestroyMatrixFunc destroy_func;
NLMultMatrixVectorFunc mult_func;
NLuint diag_size;
NLuint diag_capacity;
NLenum storage;
NLRowColumn* row;
NLRowColumn* column;
NLdouble* diag;
NLuint row_capacity;
NLuint column_capacity;
} NLSparseMatrix;
NLAPI NLMatrix NLAPIENTRY nlSparseMatrixNew(
NLuint m, NLuint n, NLenum storage
);
NLAPI void NLAPIENTRY nlSparseMatrixConstruct(
NLSparseMatrix* M, NLuint m, NLuint n, NLenum storage
);
NLAPI void NLAPIENTRY nlSparseMatrixDestroy(NLSparseMatrix* M);
NLAPI void NLAPIENTRY nlSparseMatrixMult(
NLSparseMatrix* A, const NLdouble* x, NLdouble* y
);
NLAPI void NLAPIENTRY nlSparseMatrixAdd(
NLSparseMatrix* M, NLuint i, NLuint j, NLdouble value
);
NLAPI void NLAPIENTRY nlSparseMatrixAddMatrix(
NLSparseMatrix* M, double mul, const NLMatrix N
);
NLAPI void NLAPIENTRY nlSparseMatrixZero( NLSparseMatrix* M);
NLAPI void NLAPIENTRY nlSparseMatrixClear( NLSparseMatrix* M);
NLAPI NLuint NLAPIENTRY nlSparseMatrixNNZ( NLSparseMatrix* M);
NLAPI void NLAPIENTRY nlSparseMatrixSort( NLSparseMatrix* M);
NLAPI void NLAPIENTRY nlSparseMatrixAddRow( NLSparseMatrix* M);
NLAPI void NLAPIENTRY nlSparseMatrixAddColumn( NLSparseMatrix* M);
NLAPI void NLAPIENTRY nlSparseMatrixMAddRow(
NLSparseMatrix* M, NLuint i1, double s, NLuint i2
);
NLAPI void NLAPIENTRY nlSparseMatrixScaleRow(
NLSparseMatrix* M, NLuint i, double s
);
NLAPI void NLAPIENTRY nlSparseMatrixZeroRow(
NLSparseMatrix* M, NLuint i
);
NLAPI NLMatrix NLAPIENTRY nlCRSMatrixNewFromSparseMatrix(NLSparseMatrix* M);
NLAPI NLMatrix NLAPIENTRY nlCRSMatrixNewFromSparseMatrixSymmetric(
NLSparseMatrix* M
);
NLAPI void NLAPIENTRY nlMatrixCompress(NLMatrix* M);
NLAPI NLuint NLAPIENTRY nlMatrixNNZ(NLMatrix M);
NLAPI NLMatrix NLAPIENTRY nlMatrixFactorize(NLMatrix M, NLenum solver);
typedef void(*NLMatrixFunc)(const double* x, double* y);
NLAPI NLMatrix NLAPIENTRY nlMatrixNewFromFunction(
NLuint m, NLuint n, NLMatrixFunc func
);
NLAPI NLMatrixFunc NLAPIENTRY nlMatrixGetFunction(NLMatrix M);
NLAPI NLMatrix NLAPIENTRY nlMatrixNewFromProduct(
NLMatrix M, NLboolean product_owns_M,
NLMatrix N, NLboolean product_owns_N
);
#ifdef __cplusplus
}
#endif
#endif
/******* extracted from nl_context.h *******/
#ifndef OPENNL_CONTEXT_H
#define OPENNL_CONTEXT_H
/* NLContext data structure */
typedef NLboolean(*NLSolverFunc)(void);
typedef void(*NLProgressFunc)(
NLuint cur_iter, NLuint max_iter, double cur_err, double max_err
);
#define NL_STATE_INITIAL 0
#define NL_STATE_SYSTEM 1
#define NL_STATE_MATRIX 2
#define NL_STATE_ROW 3
#define NL_STATE_MATRIX_CONSTRUCTED 4
#define NL_STATE_SYSTEM_CONSTRUCTED 5
#define NL_STATE_SOLVED 6
typedef struct {
void* base_address;
NLuint stride;
} NLBufferBinding;
#define NL_BUFFER_ITEM(B,i) \
*(double*)((void*)((char*)((B).base_address)+((i)*(B).stride)))
typedef struct {
NLenum state;
NLboolean user_variable_buffers;
NLBufferBinding* variable_buffer;
NLdouble* variable_value;
NLboolean* variable_is_locked;
NLuint* variable_index;
NLuint n;
NLenum matrix_mode;
NLMatrix M;
NLMatrix P;
NLMatrix B;
NLRowColumn af;
NLRowColumn al;
NLdouble* x;
NLdouble* b;
NLdouble* right_hand_side;
NLdouble row_scaling;
NLenum solver;
NLenum preconditioner;
NLboolean preconditioner_defined;
NLuint nb_variables;
NLuint nb_systems;
NLboolean ij_coefficient_called;
NLuint current_row;
NLboolean least_squares;
NLboolean symmetric;
NLuint max_iterations;
NLboolean max_iterations_defined;
NLuint inner_iterations;
NLdouble threshold;
NLboolean threshold_defined;
NLdouble omega;
NLboolean normalize_rows;
NLuint used_iterations;
NLdouble error;
NLdouble start_time;
NLdouble elapsed_time;
NLSolverFunc solver_func;
NLProgressFunc progress_func;
NLboolean verbose;
NLulong flops;
NLenum eigen_solver;
NLdouble eigen_shift;
NLboolean eigen_shift_invert;
NLdouble* eigen_value;
NLdouble* temp_eigen_value;
} NLContextStruct;
extern NLContextStruct* nlCurrentContext;
void nlCheckState(NLenum state);
void nlTransition(NLenum from_state, NLenum to_state);
NLboolean nlDefaultSolver(void);
#endif
/******* extracted from nl_iterative_solvers.h *******/
#ifndef OPENNL_ITERATIVE_SOLVERS_H
#define OPENNL_ITERATIVE_SOLVERS_H
NLAPI NLuint NLAPIENTRY nlSolveSystemIterative(
NLBlas_t blas,
NLMatrix M, NLMatrix P, NLdouble* b, NLdouble* x,
NLenum solver,
double eps, NLuint max_iter, NLuint inner_iter
);
#endif
/******* extracted from nl_preconditioners.h *******/
#ifndef OPENNL_PRECONDITIONERS_H
#define OPENNL_PRECONDITIONERS_H
/* preconditioners */
NLMatrix nlNewJacobiPreconditioner(NLMatrix M);
NLMatrix nlNewSSORPreconditioner(NLMatrix M, double omega);
#endif
/******* extracted from nl_superlu.h *******/
#ifndef OPENNL_SUPERLU_H
#define OPENNL_SUPERLU_H
NLAPI NLMatrix NLAPIENTRY nlMatrixFactorize_SUPERLU(
NLMatrix M, NLenum solver
);
NLboolean nlInitExtension_SUPERLU(void);
NLboolean nlExtensionIsInitialized_SUPERLU(void);
#endif
/******* extracted from nl_cholmod.h *******/
#ifndef OPENNL_CHOLMOD_H
#define OPENNL_CHOLMOD_H
NLAPI NLMatrix NLAPIENTRY nlMatrixFactorize_CHOLMOD(
NLMatrix M, NLenum solver
);
NLboolean nlInitExtension_CHOLMOD(void);
NLboolean nlExtensionIsInitialized_CHOLMOD(void);
#endif
/******* extracted from nl_arpack.h *******/
#ifndef OPENNL_ARPACK_H
#define OPENNL_ARPACK_H
NLboolean nlInitExtension_ARPACK(void);
NLboolean nlExtensionIsInitialized_ARPACK(void);
void nlEigenSolve_ARPACK(void);
#endif
/******* extracted from nl_mkl.h *******/
#ifndef OPENNL_MKL_H
#define OPENNL_MKL_H
NLboolean nlInitExtension_MKL(void);
NLboolean nlExtensionIsInitialized_MKL(void);
extern NLMultMatrixVectorFunc NLMultMatrixVector_MKL;
#endif
/******* extracted from nl_cuda.h *******/
#ifndef OPENNL_CUDA_EXT_H
#define OPENNL_CUDA_EXT_H
NLboolean nlInitExtension_CUDA(void);
NLboolean nlExtensionIsInitialized_CUDA(void);
NLMatrix nlCUDAMatrixNewFromCRSMatrix(NLMatrix M);
NLMatrix nlCUDAJacobiPreconditionerNewFromCRSMatrix(NLMatrix M);
NLBlas_t nlCUDABlas(void);
#endif
/******* extracted from nl_os.c *******/
#if (defined (WIN32) || defined(_WIN64))
#include <windows.h>
#else
#include <sys/types.h>
#include <sys/times.h>
#endif
#if defined(GEO_DYNAMIC_LIBS) && defined(NL_OS_UNIX)
#include <dlfcn.h>
#endif
/* Assertions */
void nl_assertion_failed(const char* cond, const char* file, int line) {
nl_fprintf(
stderr,
"OpenNL assertion failed: %s, file:%s, line:%d\n",
cond,file,line
) ;
abort() ;
}
void nl_range_assertion_failed(
double x, double min_val, double max_val, const char* file, int line
) {
nl_fprintf(
stderr,
"OpenNL range assertion failed: "
"%f in [ %f ... %f ], file:%s, line:%d\n",
x, min_val, max_val, file,line
) ;
abort() ;
}
void nl_should_not_have_reached(const char* file, int line) {
nl_fprintf(
stderr,
"OpenNL should not have reached this point: file:%s, line:%d\n",
file,line
) ;
abort() ;
}
/* Timing */
#ifdef WIN32
NLdouble nlCurrentTime() {
return (NLdouble)GetTickCount() / 1000.0 ;
}
#else
double nlCurrentTime() {
clock_t user_clock ;
struct tms user_tms ;
user_clock = times(&user_tms) ;
return (NLdouble)user_clock / 100.0 ;
}
#endif
/* DLLs/shared objects/dylibs */
#if defined(GEO_DYNAMIC_LIBS)
# if defined(NL_OS_UNIX)
NLdll nlOpenDLL(const char* name, NLenum flags_in) {
void* result = NULL;
int flags = 0;
if((flags_in & NL_LINK_NOW) != 0) {
flags |= RTLD_NOW;
}
if((flags_in & NL_LINK_LAZY) != 0) {
flags |= RTLD_LAZY;
}
if((flags_in & NL_LINK_GLOBAL) != 0) {
flags |= RTLD_GLOBAL;
}
if((flags_in & NL_LINK_QUIET) == 0) {
nl_fprintf(stdout,"Trying to load %s\n", name);
}
result = dlopen(name, flags);
if(result == NULL) {
if((flags_in & NL_LINK_QUIET) == 0) {
nl_fprintf(stderr,"Did not find %s,\n", name);
nl_fprintf(stderr,"Retrying with libgeogram_num_3rdparty.so\n");
}
if((flags_in & NL_LINK_USE_FALLBACK) != 0) {
result=dlopen("libgeogram_num_3rdparty.so", flags);
if(result == NULL) {
if((flags_in & NL_LINK_QUIET) == 0) {
nlError("nlOpenDLL/dlopen",dlerror());
}
}
}
}
if((flags_in & NL_LINK_QUIET) == 0 && result != NULL) {
nl_fprintf(stdout,"Loaded %s\n", name);
}
return result;
}
void nlCloseDLL(void* handle) {
dlclose(handle);
}
NLfunc nlFindFunction(void* handle, const char* name) {
/*
* It is not legal in modern C to cast a void*
* pointer into a function pointer, thus requiring this
* (quite dirty) function that uses a union.
*/
union {
void* ptr;
NLfunc fptr;
} u;
u.ptr = dlsym(handle, name);
return u.fptr;
}
# elif defined(NL_OS_WINDOWS)
NLdll nlOpenDLL(const char* name, NLenum flags) {
/* Note: NL_LINK_LAZY and NL_LINK_GLOBAL are ignored. */
void* result = LoadLibrary(name);
if(result == NULL && ((flags & NL_LINK_USE_FALLBACK) != 0)) {
if((flags & NL_LINK_QUIET) == 0) {
nl_fprintf(stderr,"Did not find %s,\n", name);
nl_fprintf(stderr,"Retrying with geogram_num_3rdparty\n");
}
result=LoadLibrary("geogram_num_3rdparty.dll");
}
return result;
}
void nlCloseDLL(void* handle) {
FreeLibrary((HMODULE)handle);
}
NLfunc nlFindFunction(void* handle, const char* name) {
return (NLfunc)GetProcAddress((HMODULE)handle, name);
}
# endif
#else
NLdll nlOpenDLL(const char* name, NLenum flags) {
nl_arg_used(name);
nl_arg_used(flags);
#ifdef NL_OS_UNIX
nlError("nlOpenDLL","Was not compiled with dynamic linking enabled");
nlError("nlOpenDLL","(see VORPALINE_BUILD_DYNAMIC in CMakeLists.txt)");
#else
nlError("nlOpenDLL","Not implemented");
#endif
return NULL;
}
void nlCloseDLL(void* handle) {
nl_arg_used(handle);
nlError("nlCloseDLL","Not implemented");
}
NLfunc nlFindFunction(void* handle, const char* name) {
nl_arg_used(handle);
nl_arg_used(name);
nlError("nlFindFunction","Not implemented");
return NULL;
}
#endif
/* Error-reporting functions */
NLprintfFunc nl_printf = printf;
NLfprintfFunc nl_fprintf = fprintf;
void nlError(const char* function, const char* message) {
nl_fprintf(stderr, "OpenNL error in %s(): %s\n", function, message) ;
}
void nlWarning(const char* function, const char* message) {
nl_fprintf(stderr, "OpenNL warning in %s(): %s\n", function, message) ;
}
void nlPrintfFuncs(NLprintfFunc f1, NLfprintfFunc f2) {
nl_printf = f1;
nl_fprintf = f2;
}
/******* extracted from nl_matrix.c *******/
/*
Some warnings about const cast in callback for
qsort() function.
*/
#ifdef __clang__
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
void nlDeleteMatrix(NLMatrix M) {
if(M == NULL) {
return;
}
M->destroy_func(M);
NL_DELETE(M);
}
void nlMultMatrixVector(
NLMatrix M, const double* x, double* y
) {
M->mult_func(M,x,y);
}
void nlRowColumnConstruct(NLRowColumn* c) {
c->size = 0;
c->capacity = 0;
c->coeff = NULL;
}
void nlRowColumnDestroy(NLRowColumn* c) {
NL_DELETE_ARRAY(c->coeff);
c->size = 0;
c->capacity = 0;
}
void nlRowColumnGrow(NLRowColumn* c) {
if(c->capacity != 0) {
c->capacity = 2 * c->capacity;
c->coeff = NL_RENEW_ARRAY(NLCoeff, c->coeff, c->capacity);
} else {
c->capacity = 4;
c->coeff = NL_NEW_ARRAY(NLCoeff, c->capacity);
}
}
void nlRowColumnAdd(NLRowColumn* c, NLuint index, NLdouble value) {
NLuint i;
for(i=0; i<c->size; i++) {
if(c->coeff[i].index == index) {
c->coeff[i].value += value;
return;
}
}
if(c->size == c->capacity) {
nlRowColumnGrow(c);
}
c->coeff[c->size].index = index;
c->coeff[c->size].value = value;
c->size++;
}
/* Does not check whether the index already exists */
void nlRowColumnAppend(NLRowColumn* c, NLuint index, NLdouble value) {
if(c->size == c->capacity) {
nlRowColumnGrow(c);
}
c->coeff[c->size].index = index;
c->coeff[c->size].value = value;
c->size++;
}
void nlRowColumnZero(NLRowColumn* c) {
c->size = 0;
}
void nlRowColumnClear(NLRowColumn* c) {
c->size = 0;
c->capacity = 0;
NL_DELETE_ARRAY(c->coeff);
}
static int nlCoeffCompare(const void* p1, const void* p2) {
return (((NLCoeff*)(p2))->index < ((NLCoeff*)(p1))->index);
}
void nlRowColumnSort(NLRowColumn* c) {
qsort(c->coeff, c->size, sizeof(NLCoeff), nlCoeffCompare);
}
/* CRSMatrix data structure */
static void nlCRSMatrixDestroy(NLCRSMatrix* M) {
NL_DELETE_ARRAY(M->val);
NL_DELETE_ARRAY(M->rowptr);
NL_DELETE_ARRAY(M->colind);
NL_DELETE_ARRAY(M->sliceptr);
M->m = 0;
M->n = 0;
M->nslices = 0;
}
NLboolean nlCRSMatrixSave(NLCRSMatrix* M, const char* filename) {
NLuint nnz = M->rowptr[M->m];
FILE* f = fopen(filename, "rb");
if(f == NULL) {
nlError("nlCRSMatrixSave", "Could not open file");
return NL_FALSE;
}
fwrite(&M->m, sizeof(NLuint), 1, f);
fwrite(&M->n, sizeof(NLuint), 1, f);
fwrite(&nnz, sizeof(NLuint), 1, f);
fwrite(M->rowptr, sizeof(NLuint), M->m+1, f);
fwrite(M->colind, sizeof(NLuint), nnz, f);
fwrite(M->val, sizeof(double), nnz, f);
return NL_TRUE;
}
NLboolean nlCRSMatrixLoad(NLCRSMatrix* M, const char* filename) {
NLuint nnz = 0;
FILE* f = fopen(filename, "rb");
NLboolean truncated = NL_FALSE;
if(f == NULL) {
nlError("nlCRSMatrixLoad", "Could not open file");
return NL_FALSE;
}
truncated = truncated || (
fread(&M->m, sizeof(NLuint), 1, f) != 1 ||
fread(&M->n, sizeof(NLuint), 1, f) != 1 ||
fread(&nnz, sizeof(NLuint), 1, f) != 1
);
if(truncated) {
M->rowptr = NULL;
M->colind = NULL;
M->val = NULL;
} else {
M->rowptr = NL_NEW_ARRAY(NLuint, M->m+1);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
M->val = NL_NEW_ARRAY(double, nnz);
truncated = truncated || (
fread(M->rowptr, sizeof(NLuint), M->m+1, f) != M->m+1 ||
fread(M->colind, sizeof(NLuint), nnz, f) != nnz ||
fread(M->val, sizeof(double), nnz, f) != nnz
);
}
if(truncated) {
nlError("nlCRSMatrixSave", "File appears to be truncated");
NL_DELETE_ARRAY(M->rowptr);
NL_DELETE_ARRAY(M->colind);
NL_DELETE_ARRAY(M->val);
return NL_FALSE;
} else {
M->nslices = 1;
M->sliceptr = NL_NEW_ARRAY(NLuint, M->nslices+1);
M->sliceptr[0] = 0;
M->sliceptr[1] = M->m;
}
fclose(f);
return NL_TRUE;
}
NLuint nlCRSMatrixNNZ(NLCRSMatrix* M) {
return M->rowptr[M->m];
}
static void nlCRSMatrixMultSlice(
NLCRSMatrix* M, const double* x, double* y, NLuint Ibegin, NLuint Iend
) {
NLuint i,j;
for(i=Ibegin; i<Iend; ++i) {
double sum=0.0;
for(j=M->rowptr[i]; j<M->rowptr[i+1]; ++j) {
sum += M->val[j] * x[M->colind[j]];
}
y[i] = sum;
}
}
static void nlCRSMatrixMult(
NLCRSMatrix* M, const double* x, double* y
) {
int slice;
int nslices = (int)(M->nslices);
NLuint i,j,jj;
NLdouble a;
if(M->symmetric_storage) {
for(i=0; i<M->m; ++i) {
y[i] = 0.0;
}
for(i=0; i<M->m; ++i) {
for(jj=M->rowptr[i]; jj<M->rowptr[i+1]; ++jj) {
a = M->val[jj];
j = M->colind[jj];
y[i] += a * x[j];
if(j != i) {
y[j] += a * x[i];
}
}
}
} else {
#if defined(_OPENMP)
#pragma omp parallel for private(slice)
#endif
for(slice=0; slice<nslices; ++slice) {
nlCRSMatrixMultSlice(
M,x,y,M->sliceptr[slice],M->sliceptr[slice+1]
);
}
}
nlHostBlas()->flops += (NLulong)(2*nlCRSMatrixNNZ(M));
}
void nlCRSMatrixConstruct(
NLCRSMatrix* M, NLuint m, NLuint n, NLuint nnz, NLuint nslices
) {
M->m = m;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
if(NLMultMatrixVector_MKL != NULL) {
M->mult_func = (NLMultMatrixVectorFunc)NLMultMatrixVector_MKL;
} else {
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
}
M->nslices = nslices;
M->val = NL_NEW_ARRAY(double, nnz);
M->rowptr = NL_NEW_ARRAY(NLuint, m+1);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
M->sliceptr = NL_NEW_ARRAY(NLuint, nslices+1);
M->symmetric_storage = NL_FALSE;
}
void nlCRSMatrixConstructSymmetric(
NLCRSMatrix* M, NLuint n, NLuint nnz
) {
M->m = n;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
M->nslices = 0;
M->val = NL_NEW_ARRAY(double, nnz);
M->rowptr = NL_NEW_ARRAY(NLuint, n+1);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
M->sliceptr = NULL;
M->symmetric_storage = NL_TRUE;
}
/* SparseMatrix data structure */
static void nlSparseMatrixDestroyRowColumns(NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<M->m; i++) {
nlRowColumnDestroy(&(M->row[i]));
}
NL_DELETE_ARRAY(M->row);
}
M->storage = (NLenum)((int)(M->storage) & ~NL_MATRIX_STORE_ROWS);
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i=0; i<M->n; i++) {
nlRowColumnDestroy(&(M->column[i]));
}
NL_DELETE_ARRAY(M->column);
}
M->storage = (NLenum)((int)(M->storage) & ~NL_MATRIX_STORE_COLUMNS);
}
void nlSparseMatrixDestroy(NLSparseMatrix* M) {
nl_assert(M->type == NL_MATRIX_SPARSE_DYNAMIC);
nlSparseMatrixDestroyRowColumns(M);
NL_DELETE_ARRAY(M->diag);
#ifdef NL_PARANOID
NL_CLEAR(NLSparseMatrix,M);
#endif
}
void nlSparseMatrixAdd(NLSparseMatrix* M, NLuint i, NLuint j, NLdouble value) {
nl_parano_range_assert(i, 0, M->m - 1);
nl_parano_range_assert(j, 0, M->n - 1);
if((M->storage & NL_MATRIX_STORE_SYMMETRIC) && (j > i)) {
return;
}
if(i == j) {
M->diag[i] += value;
}
if(M->storage & NL_MATRIX_STORE_ROWS) {
nlRowColumnAdd(&(M->row[i]), j, value);
}
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
nlRowColumnAdd(&(M->column[j]), i, value);
}
}
static void nlSparseMatrixAddSparseMatrix(
NLSparseMatrix* M, double mul, const NLSparseMatrix* N
) {
NLuint i,j,ii,jj;
nl_assert(M->m == N->m);
nl_assert(M->n == N->n);
if(N->storage & NL_MATRIX_STORE_SYMMETRIC) {
nl_assert(M->storage & NL_MATRIX_STORE_SYMMETRIC);
}
if(N->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<N->m; ++i) {
for(jj=0; jj<N->row[i].size; ++jj) {
nlSparseMatrixAdd(
M,
i, N->row[i].coeff[jj].index,
mul*N->row[i].coeff[jj].value
);
}
}
} else {
nl_assert(N->storage & NL_MATRIX_STORE_COLUMNS);
for(j=0; j<N->n; ++j) {
for(ii=0; ii<N->column[j].size; ++ii) {
nlSparseMatrixAdd(
M,
N->column[j].coeff[ii].index, j,
mul*N->column[j].coeff[ii].value
);
}
}
}
}
static void nlSparseMatrixAddCRSMatrix(
NLSparseMatrix* M, double mul, const NLCRSMatrix* N
) {
NLuint i,jj;
nl_assert(M->m == N->m);
nl_assert(M->n == N->n);
for(i=0; i<M->m; ++i) {
for(jj=N->rowptr[i]; jj<N->rowptr[i+1]; ++jj) {
nlSparseMatrixAdd(
M,
i,
N->colind[jj],
mul*N->val[jj]
);
}
}
}
void nlSparseMatrixAddMatrix(
NLSparseMatrix* M, double mul, const NLMatrix N
) {
nl_assert(M->m == N->m);
nl_assert(M->n == N->n);
if(N->type == NL_MATRIX_SPARSE_DYNAMIC) {
nlSparseMatrixAddSparseMatrix(M, mul, (const NLSparseMatrix*)N);
} else if(N->type == NL_MATRIX_CRS) {
nlSparseMatrixAddCRSMatrix(M, mul, (const NLCRSMatrix*)N);
} else {
nl_assert_not_reached;
}
}
void nlSparseMatrixZero( NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<M->m; i++) {
nlRowColumnZero(&(M->row[i]));
}
}
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i=0; i<M->n; i++) {
nlRowColumnZero(&(M->column[i]));
}
}
NL_CLEAR_ARRAY(NLdouble, M->diag, M->diag_size);
}
void nlSparseMatrixClear( NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<M->m; i++) {
nlRowColumnClear(&(M->row[i]));
}
}
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i=0; i<M->n; i++) {
nlRowColumnClear(&(M->column[i]));
}
}
NL_CLEAR_ARRAY(NLdouble, M->diag, M->diag_size);
}
/* Returns the number of non-zero coefficients */
NLuint nlSparseMatrixNNZ( NLSparseMatrix* M) {
NLuint nnz = 0;
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i = 0; i<M->m; i++) {
nnz += M->row[i].size;
}
} else if (M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i = 0; i<M->n; i++) {
nnz += M->column[i].size;
}
} else {
nl_assert_not_reached;
}
return nnz;
}
void nlSparseMatrixSort( NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i = 0; i<M->m; i++) {
nlRowColumnSort(&(M->row[i]));
}
}
if (M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i = 0; i<M->n; i++) {
nlRowColumnSort(&(M->column[i]));
}
}
}
void nlSparseMatrixMAddRow(
NLSparseMatrix* M, NLuint i1, double s, NLuint i2
) {
NLuint jj;
NLRowColumn* Ri2 = &(M->row[i2]);
NLCoeff* c = NULL;
nl_debug_assert(i1 < M->m);
nl_debug_assert(i2 < M->m);
for(jj=0; jj<Ri2->size; ++jj) {
c = &(Ri2->coeff[jj]);
nlSparseMatrixAdd(M, i1, c->index, s*c->value);
}
}
void nlSparseMatrixScaleRow(
NLSparseMatrix* M, NLuint i, double s
) {
NLuint jj;
NLRowColumn* Ri = &(M->row[i]);
NLCoeff* c = NULL;
nl_assert(M->storage & NL_MATRIX_STORE_ROWS);
nl_assert(!(M->storage & NL_MATRIX_STORE_COLUMNS));
nl_debug_assert(i < M->m);
for(jj=0; jj<Ri->size; ++jj) {
c = &(Ri->coeff[jj]);
c->value *= s;
}
if(i < M->diag_size) {
M->diag[i] *= s;
}
}
void nlSparseMatrixZeroRow(
NLSparseMatrix* M, NLuint i
) {
NLRowColumn* Ri = &(M->row[i]);
nl_debug_assert(i < M->m);
Ri->size = 0;
if(i < M->diag_size) {
M->diag[i] = 0.0;
}
}
/* SparseMatrix x Vector routines, internal helper routines */
static void nlSparseMatrix_mult_rows_symmetric(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
NLuint m = A->m;
NLuint i,ij;
NLCoeff* c = NULL;
for(i=0; i<m; i++) {
NLRowColumn* Ri = &(A->row[i]);
y[i] = 0;
for(ij=0; ij<Ri->size; ++ij) {
c = &(Ri->coeff[ij]);
y[i] += c->value * x[c->index];
if(i != c->index) {
y[c->index] += c->value * x[i];
}
}
}
}
static void nlSparseMatrix_mult_rows(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
/*
* Note: OpenMP does not like unsigned ints
* (causes some floating point exceptions),
* therefore I use here signed ints for all
* indices.
*/
int m = (int)(A->m);
int i,ij;
NLCoeff* c = NULL;
NLRowColumn* Ri = NULL;
#if defined(_OPENMP)
#pragma omp parallel for private(i,ij,c,Ri)
#endif
for(i=0; i<m; i++) {
Ri = &(A->row[i]);
y[i] = 0;
for(ij=0; ij<(int)(Ri->size); ij++) {
c = &(Ri->coeff[ij]);
y[i] += c->value * x[c->index];
}
}
}
static void nlSparseMatrix_mult_cols_symmetric(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
NLuint n = A->n;
NLuint j,ii;
NLCoeff* c = NULL;
for(j=0; j<n; j++) {
NLRowColumn* Cj = &(A->column[j]);
y[j] = 0;
for(ii=0; ii<Cj->size; ii++) {
c = &(Cj->coeff[ii]);
y[c->index] += c->value * x[j];
if(j != c->index) {
y[j] += c->value * x[c->index];
}
}
}
}
static void nlSparseMatrix_mult_cols(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
NLuint n = A->n;
NLuint j,ii;
NLCoeff* c = NULL;
NL_CLEAR_ARRAY(NLdouble, y, A->m);
for(j=0; j<n; j++) {
NLRowColumn* Cj = &(A->column[j]);
for(ii=0; ii<Cj->size; ii++) {
c = &(Cj->coeff[ii]);
y[c->index] += c->value * x[j];
}
}
}
void nlSparseMatrixMult(
NLSparseMatrix* A, const NLdouble* x, NLdouble* y
) {
nl_assert(A->type == NL_MATRIX_SPARSE_DYNAMIC);
if(A->storage & NL_MATRIX_STORE_ROWS) {
if(A->storage & NL_MATRIX_STORE_SYMMETRIC) {
nlSparseMatrix_mult_rows_symmetric(A, x, y);
} else {
nlSparseMatrix_mult_rows(A, x, y);
}
} else {
if(A->storage & NL_MATRIX_STORE_SYMMETRIC) {
nlSparseMatrix_mult_cols_symmetric(A, x, y);
} else {
nlSparseMatrix_mult_cols(A, x, y);
}
}
nlHostBlas()->flops += (NLulong)(2*nlSparseMatrixNNZ(A));
}
NLMatrix nlSparseMatrixNew(
NLuint m, NLuint n, NLenum storage
) {
NLSparseMatrix* result = NL_NEW(NLSparseMatrix);
nlSparseMatrixConstruct(result, m, n, storage);
return (NLMatrix)result;
}
void nlSparseMatrixConstruct(
NLSparseMatrix* M, NLuint m, NLuint n, NLenum storage
) {
NLuint i;
M->m = m;
M->n = n;
M->type = NL_MATRIX_SPARSE_DYNAMIC;
M->destroy_func = (NLDestroyMatrixFunc)nlSparseMatrixDestroy;
M->mult_func = (NLMultMatrixVectorFunc)nlSparseMatrixMult;
M->storage = storage;
if(storage & NL_MATRIX_STORE_ROWS) {
M->row = NL_NEW_ARRAY(NLRowColumn, m);
M->row_capacity = m;
for(i=0; i<n; i++) {
nlRowColumnConstruct(&(M->row[i]));
}
} else {
M->row = NULL;
M->row_capacity = 0;
}
if(storage & NL_MATRIX_STORE_COLUMNS) {
M->column = NL_NEW_ARRAY(NLRowColumn, n);
M->column_capacity = n;
for(i=0; i<n; i++) {
nlRowColumnConstruct(&(M->column[i]));
}
} else {
M->column = NULL;
M->column_capacity = 0;
}
M->diag_size = MIN(m,n);
M->diag_capacity = M->diag_size;
M->diag = NL_NEW_ARRAY(NLdouble, M->diag_size);
}
static void adjust_diag(NLSparseMatrix* M) {
NLuint new_diag_size = MIN(M->m, M->n);
NLuint i;
if(new_diag_size > M->diag_size) {
if(new_diag_size > M->diag_capacity) {
M->diag_capacity *= 2;
if(M->diag_capacity == 0) {
M->diag_capacity = 16;
}
M->diag = NL_RENEW_ARRAY(double, M->diag, M->diag_capacity);
for(i=M->diag_size; i<new_diag_size; ++i) {
M->diag[i] = 0.0;
}
}
M->diag_size= new_diag_size;
}
}
void nlSparseMatrixAddRow( NLSparseMatrix* M) {
++M->m;
if(M->storage & NL_MATRIX_STORE_ROWS) {
if(M->m > M->row_capacity) {
M->row_capacity *= 2;
if(M->row_capacity == 0) {
M->row_capacity = 16;
}
M->row = NL_RENEW_ARRAY(
NLRowColumn, M->row, M->row_capacity
);
}
nlRowColumnConstruct(&(M->row[M->m-1]));
}
adjust_diag(M);
}
void nlSparseMatrixAddColumn( NLSparseMatrix* M) {
++M->n;
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
if(M->n > M->column_capacity) {
M->column_capacity *= 2;
if(M->column_capacity == 0) {
M->column_capacity = 16;
}
M->column = NL_RENEW_ARRAY(
NLRowColumn, M->column, M->column_capacity
);
}
nlRowColumnConstruct(&(M->column[M->n-1]));
}
adjust_diag(M);
}
NLMatrix nlCRSMatrixNewFromSparseMatrix(NLSparseMatrix* M) {
NLuint nnz = nlSparseMatrixNNZ(M);
NLuint nslices = 8; /* TODO: get number of cores */
NLuint slice, cur_bound, cur_NNZ, cur_row;
NLuint i,ij,k;
NLuint slice_size = nnz / nslices;
NLCRSMatrix* CRS = NL_NEW(NLCRSMatrix);
nl_assert(M->storage & NL_MATRIX_STORE_ROWS);
if(M->storage & NL_MATRIX_STORE_SYMMETRIC) {
nl_assert(M->m == M->n);
nlCRSMatrixConstructSymmetric(CRS, M->n, nnz);
} else {
nlCRSMatrixConstruct(CRS, M->m, M->n, nnz, nslices);
}
nlSparseMatrixSort(M);
/* Convert matrix to CRS format */
k=0;
for(i=0; i<M->m; ++i) {
NLRowColumn* Ri = &(M->row[i]);
CRS->rowptr[i] = k;
for(ij=0; ij<Ri->size; ij++) {
NLCoeff* c = &(Ri->coeff[ij]);
CRS->val[k] = c->value;
CRS->colind[k] = c->index;
++k;
}
}
CRS->rowptr[M->m] = k;
/* Create "slices" to be used by parallel sparse matrix vector product */
if(CRS->sliceptr != NULL) {
cur_bound = slice_size;
cur_NNZ = 0;
cur_row = 0;
CRS->sliceptr[0]=0;
for(slice=1; slice<nslices; ++slice) {
while(cur_NNZ < cur_bound && cur_row < M->m) {
++cur_row;
cur_NNZ += CRS->rowptr[cur_row+1] - CRS->rowptr[cur_row];
}
CRS->sliceptr[slice] = cur_row;
cur_bound += slice_size;
}
CRS->sliceptr[nslices]=M->m;
}
return (NLMatrix)CRS;
}
NLMatrix nlCRSMatrixNewFromSparseMatrixSymmetric(NLSparseMatrix* M) {
NLuint nnz;
NLuint i,j,jj,k;
NLCRSMatrix* CRS = NL_NEW(NLCRSMatrix);
nl_assert(M->storage & NL_MATRIX_STORE_ROWS);
nl_assert(M->m == M->n);
nlSparseMatrixSort(M);
if(M->storage & NL_MATRIX_STORE_SYMMETRIC) {
nnz = nlSparseMatrixNNZ(M);
} else {
nnz = 0;
for(i=0; i<M->n; ++i) {
NLRowColumn* Ri = &M->row[i];
for(jj=0; jj<Ri->size; ++jj) {
j = Ri->coeff[jj].index;
if(j <= i) {
++nnz;
}
}
}
}
nlCRSMatrixConstructSymmetric(CRS, M->n, nnz);
k=0;
for(i=0; i<M->m; ++i) {
NLRowColumn* Ri = &(M->row[i]);
CRS->rowptr[i] = k;
for(jj=0; jj<Ri->size; ++jj) {
j = Ri->coeff[jj].index;
if((M->storage & NL_MATRIX_STORE_SYMMETRIC)) {
nl_debug_assert(j <= i);
}
if(j <= i) {
CRS->val[k] = Ri->coeff[jj].value;
CRS->colind[k] = j;
++k;
}
}
}
CRS->rowptr[M->m] = k;
return (NLMatrix)CRS;
}
void nlMatrixCompress(NLMatrix* M) {
NLMatrix CRS = NULL;
if((*M)->type != NL_MATRIX_SPARSE_DYNAMIC) {
return;
}
CRS = nlCRSMatrixNewFromSparseMatrix((NLSparseMatrix*)*M);
nlDeleteMatrix(*M);
*M = CRS;
}
NLuint nlMatrixNNZ(NLMatrix M) {
if(M->type == NL_MATRIX_SPARSE_DYNAMIC) {
return nlSparseMatrixNNZ((NLSparseMatrix*)M);
} else if(M->type == NL_MATRIX_CRS) {
return nlCRSMatrixNNZ((NLCRSMatrix*)M);
}
return M->m * M->n;
}
NLMatrix nlMatrixFactorize(NLMatrix M, NLenum solver) {
NLMatrix result = NULL;
switch(solver) {
case NL_SUPERLU_EXT:
case NL_PERM_SUPERLU_EXT:
case NL_SYMMETRIC_SUPERLU_EXT:
result = nlMatrixFactorize_SUPERLU(M,solver);
break;
case NL_CHOLMOD_EXT:
result = nlMatrixFactorize_CHOLMOD(M,solver);
break;
default:
nlError("nlMatrixFactorize","unknown solver");
}
return result;
}
typedef struct {
NLuint m;
NLuint n;
NLenum type;
NLDestroyMatrixFunc destroy_func;
NLMultMatrixVectorFunc mult_func;
NLMatrixFunc matrix_func;
} NLFunctionMatrix;
static void nlFunctionMatrixDestroy(NLFunctionMatrix* M) {
(void)M; /* to avoid 'unused parameter' warning */
/*
* Nothing special to do,
* there is no dynamic allocated mem.
*/
}
static void nlFunctionMatrixMult(
NLFunctionMatrix* M, const NLdouble* x, NLdouble* y
) {
M->matrix_func(x,y);
}
NLMatrix nlMatrixNewFromFunction(NLuint m, NLuint n, NLMatrixFunc func) {
NLFunctionMatrix* result = NL_NEW(NLFunctionMatrix);
result->m = m;
result->n = n;
result->type = NL_MATRIX_FUNCTION;
result->destroy_func = (NLDestroyMatrixFunc)nlFunctionMatrixDestroy;
result->mult_func = (NLMultMatrixVectorFunc)nlFunctionMatrixMult;
result->matrix_func = func;
return (NLMatrix)result;
}
NLMatrixFunc nlMatrixGetFunction(NLMatrix M) {
if(M == NULL) {
return NULL;
}
if(M->type != NL_MATRIX_FUNCTION) {
return NULL;
}
return ((NLFunctionMatrix*)M)->matrix_func;
}
typedef struct {
NLuint m;
NLuint n;
NLenum type;
NLDestroyMatrixFunc destroy_func;
NLMultMatrixVectorFunc mult_func;
NLMatrixFunc matrix_func;
NLMatrix M;
NLboolean owns_M;
NLMatrix N;
NLboolean owns_N;
NLdouble* work;
} NLMatrixProduct;
static void nlMatrixProductDestroy(NLMatrixProduct* P) {
NL_DELETE_ARRAY(P->work);
if(P->owns_M) {
nlDeleteMatrix(P->M); P->M = NULL;
}
if(P->owns_N) {
nlDeleteMatrix(P->N); P->N = NULL;
}
}
static void nlMatrixProductMult(
NLMatrixProduct* P, const NLdouble* x, NLdouble* y
) {
nlMultMatrixVector(P->N, x, P->work);
nlMultMatrixVector(P->M, P->work, y);
}
NLMatrix nlMatrixNewFromProduct(
NLMatrix M, NLboolean owns_M, NLMatrix N, NLboolean owns_N
) {
NLMatrixProduct* result = NL_NEW(NLMatrixProduct);
nl_assert(M->n == N->m);
result->m = M->m;
result->n = N->n;
result->type = NL_MATRIX_OTHER;
result->work = NL_NEW_ARRAY(NLdouble,N->m);
result->destroy_func = (NLDestroyMatrixFunc)nlMatrixProductDestroy;
result->mult_func = (NLMultMatrixVectorFunc)nlMatrixProductMult;
result->M = M;
result->owns_M = owns_M;
result->N = N;
result->owns_N = owns_N;
return (NLMatrix)result;
}
/******* extracted from nl_context.c *******/
NLContextStruct* nlCurrentContext = NULL;
NLContext nlNewContext() {
NLContextStruct* result = NL_NEW(NLContextStruct);
result->state = NL_STATE_INITIAL;
result->solver = NL_SOLVER_DEFAULT;
result->max_iterations = 100;
result->threshold = 1e-6;
result->omega = 1.5;
result->row_scaling = 1.0;
result->inner_iterations = 5;
result->solver_func = nlDefaultSolver;
result->progress_func = NULL;
result->verbose = NL_FALSE;
result->nb_systems = 1;
result->matrix_mode = NL_STIFFNESS_MATRIX;
nlMakeCurrent(result);
return result;
}
void nlDeleteContext(NLContext context_in) {
NLContextStruct* context = (NLContextStruct*)(context_in);
if(nlCurrentContext == context) {
nlCurrentContext = NULL;
}
nlDeleteMatrix(context->M);
context->M = NULL;
nlDeleteMatrix(context->P);
context->P = NULL;
nlDeleteMatrix(context->B);
context->B = NULL;
nlRowColumnDestroy(&context->af);
nlRowColumnDestroy(&context->al);
NL_DELETE_ARRAY(context->variable_value);
NL_DELETE_ARRAY(context->variable_buffer);
NL_DELETE_ARRAY(context->variable_is_locked);
NL_DELETE_ARRAY(context->variable_index);
NL_DELETE_ARRAY(context->x);
NL_DELETE_ARRAY(context->b);
NL_DELETE_ARRAY(context->right_hand_side);
NL_DELETE_ARRAY(context->eigen_value);
#ifdef NL_PARANOID
NL_CLEAR(NLContextStruct, context);
#endif
NL_DELETE(context);
}
void nlMakeCurrent(NLContext context) {
nlCurrentContext = (NLContextStruct*)(context);
}
NLContext nlGetCurrent() {
return nlCurrentContext;
}
/* Finite state automaton */
void nlCheckState(NLenum state) {
nl_assert(nlCurrentContext->state == state);
}
void nlTransition(NLenum from_state, NLenum to_state) {
nlCheckState(from_state);
nlCurrentContext->state = to_state;
}
/* Preconditioner setup and default solver */
static void nlSetupPreconditioner() {
/* Check compatibility between solver and preconditioner */
if(
nlCurrentContext->solver == NL_BICGSTAB &&
nlCurrentContext->preconditioner == NL_PRECOND_SSOR
) {
nlWarning(
"nlSolve",
"cannot use SSOR preconditioner with non-symmetric matrix, "
"switching to Jacobi"
);
nlCurrentContext->preconditioner = NL_PRECOND_JACOBI;
}
if(
nlCurrentContext->solver == NL_GMRES &&
nlCurrentContext->preconditioner != NL_PRECOND_NONE
) {
nlWarning("nlSolve", "Preconditioner not implemented yet for GMRES");
nlCurrentContext->preconditioner = NL_PRECOND_NONE;
}
if(
nlCurrentContext->solver == NL_SUPERLU_EXT &&
nlCurrentContext->preconditioner != NL_PRECOND_NONE
) {
nlWarning("nlSolve", "Preconditioner not implemented yet for SUPERLU");
nlCurrentContext->preconditioner = NL_PRECOND_NONE;
}
if(
nlCurrentContext->solver == NL_CHOLMOD_EXT &&
nlCurrentContext->preconditioner != NL_PRECOND_NONE
) {
nlWarning("nlSolve", "Preconditioner not implemented yet for CHOLMOD");
nlCurrentContext->preconditioner = NL_PRECOND_NONE;
}
if(
nlCurrentContext->solver == NL_PERM_SUPERLU_EXT &&
nlCurrentContext->preconditioner != NL_PRECOND_NONE
) {
nlWarning(
"nlSolve", "Preconditioner not implemented yet for PERMSUPERLU"
);
nlCurrentContext->preconditioner = NL_PRECOND_NONE;
}
if(
nlCurrentContext->solver == NL_SYMMETRIC_SUPERLU_EXT &&
nlCurrentContext->preconditioner != NL_PRECOND_NONE
) {
nlWarning(
"nlSolve", "Preconditioner not implemented yet for PERMSUPERLU"
);
nlCurrentContext->preconditioner = NL_PRECOND_NONE;
}
nlDeleteMatrix(nlCurrentContext->P);
nlCurrentContext->P = NULL;
switch(nlCurrentContext->preconditioner) {
case NL_PRECOND_NONE:
break;
case NL_PRECOND_JACOBI:
nlCurrentContext->P = nlNewJacobiPreconditioner(nlCurrentContext->M);
break;
case NL_PRECOND_SSOR:
nlCurrentContext->P = nlNewSSORPreconditioner(
nlCurrentContext->M,nlCurrentContext->omega
);
break;
case NL_PRECOND_USER:
break;
default:
nl_assert_not_reached;
}
if(nlCurrentContext->preconditioner != NL_PRECOND_SSOR) {
if(getenv("NL_LOW_MEM") == NULL) {
nlMatrixCompress(&nlCurrentContext->M);
}
}
}
static NLboolean nlSolveDirect() {
NLdouble* b = nlCurrentContext->b;
NLdouble* x = nlCurrentContext->x;
NLuint n = nlCurrentContext->n;
NLuint k;
NLMatrix F = nlMatrixFactorize(
nlCurrentContext->M, nlCurrentContext->solver
);
if(F == NULL) {
return NL_FALSE;
}
for(k=0; k<nlCurrentContext->nb_systems; ++k) {
nlMultMatrixVector(F, b, x);
b += n;
x += n;
}
nlDeleteMatrix(F);
return NL_TRUE;
}
static NLboolean nlSolveIterative() {
NLboolean use_CUDA = NL_FALSE;
NLdouble* b = nlCurrentContext->b;
NLdouble* x = nlCurrentContext->x;
NLuint n = nlCurrentContext->n;
NLuint k;
NLBlas_t blas = nlHostBlas();
NLMatrix M = nlCurrentContext->M;
NLMatrix P = nlCurrentContext->P;
/*
* For CUDA: it is implemented for
* all iterative solvers except GMRES
* Jacobi preconditioner
*/
if(nlExtensionIsInitialized_CUDA() &&
(nlCurrentContext->solver != NL_GMRES) &&
(nlCurrentContext->preconditioner == NL_PRECOND_NONE ||
nlCurrentContext->preconditioner == NL_PRECOND_JACOBI)
) {
if(nlCurrentContext->verbose) {
nl_printf("Using CUDA\n");
}
use_CUDA = NL_TRUE;
blas = nlCUDABlas();
if(nlCurrentContext->preconditioner == NL_PRECOND_JACOBI) {
P = nlCUDAJacobiPreconditionerNewFromCRSMatrix(M);
}
M = nlCUDAMatrixNewFromCRSMatrix(M);
}
/*
* We do not count CUDA transfers and CUDA matrix construction
* when estimating GFlops
*/
nlCurrentContext->start_time = nlCurrentTime();
nlBlasResetStats(blas);
for(k=0; k<nlCurrentContext->nb_systems; ++k) {
nlSolveSystemIterative(
blas,
M,
P,
b,
x,
nlCurrentContext->solver,
nlCurrentContext->threshold,
nlCurrentContext->max_iterations,
nlCurrentContext->inner_iterations
);
b += n;
x += n;
}
nlCurrentContext->flops += blas->flops;
if(use_CUDA) {
nlDeleteMatrix(M);
nlDeleteMatrix(P);
}
return NL_TRUE;
}
NLboolean nlDefaultSolver() {
NLboolean result = NL_TRUE;
nlSetupPreconditioner();
switch(nlCurrentContext->solver) {
case NL_CG:
case NL_BICGSTAB:
case NL_GMRES: {
result = nlSolveIterative();
} break;
case NL_SUPERLU_EXT:
case NL_PERM_SUPERLU_EXT:
case NL_SYMMETRIC_SUPERLU_EXT:
case NL_CHOLMOD_EXT: {
result = nlSolveDirect();
} break;
default:
nl_assert_not_reached;
}
return result;
}
/******* extracted from nl_blas.c *******/
/*
Many warnings about const double* converted to
double* when calling BLAS functions that do not
have the const qualifier in their prototypes.
*/
#ifdef __clang__
#pragma GCC diagnostic ignored "-Wcast-qual"
#pragma GCC diagnostic ignored "-Wcomma"
#endif
#ifndef NL_FORTRAN_WRAP
#define NL_FORTRAN_WRAP(x) x##_
#endif
#ifdef NL_USE_ATLAS
int NL_FORTRAN_WRAP(xerbla)(char *srname, int *info) {
nl_printf(stderr, "** On entry to %6s, parameter number %2d had an illegal value\n",
srname, *info
);
return 0;
}
#ifndef NL_USE_BLAS
#define NL_USE_BLAS
#endif
#endif
#ifdef NL_USE_SUPERLU
#ifndef NL_USE_BLAS
#define NL_USE_BLAS
/*
* The BLAS included in SuperLU does not have DTPSV,
* we use the DTPSV embedded in OpenNL.
*/
#define NEEDS_DTPSV
#endif
#endif
#ifndef NL_USE_BLAS
#define NEEDS_DTPSV
#endif
/* BLAS routines */
/* copy-pasted from CBLAS (i.e. generated from f2c) */
/*
* lsame
* xerbla
* daxpy
* ddot
* dscal
* dnrm2
* dcopy
* dgemv
* dtpsv
*/
typedef NLint integer ;
typedef NLdouble doublereal ;
typedef NLboolean logical ;
typedef NLint ftnlen ;
#ifndef max
#define max(x,y) ((x) > (y) ? (x) : (y))
#endif
#ifndef NL_USE_BLAS
static int NL_FORTRAN_WRAP(lsame)(const char *ca, const char *cb)
{
/* -- LAPACK auxiliary routine (version 2.0) --
Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
Courant Institute, Argonne National Lab, and Rice University
September 30, 1994
Purpose
=======
LSAME returns .TRUE. if CA is the same letter as CB regardless of case.
Arguments
=========
CA (input) CHARACTER*1
CB (input) CHARACTER*1
CA and CB specify the single characters to be compared.
=====================================================================
*/
/* System generated locals */
int ret_val;
/* Local variables */
int inta, intb, zcode;
ret_val = *(unsigned char *)ca == *(unsigned char *)cb;
if (ret_val) {
return ret_val;
}
/* Now test for equivalence if both characters are alphabetic. */
zcode = 'Z';
/* Use 'Z' rather than 'A' so that ASCII can be detected on Prime
machines, on which ICHAR returns a value with bit 8 set.
ICHAR('A') on Prime machines returns 193 which is the same as
ICHAR('A') on an EBCDIC machine. */
inta = *(unsigned char *)ca;
intb = *(unsigned char *)cb;
if (zcode == 90 || zcode == 122) {
/* ASCII is assumed - ZCODE is the ASCII code of either lower or
upper case 'Z'. */
if (inta >= 97 && inta <= 122) inta += -32;
if (intb >= 97 && intb <= 122) intb += -32;
} else if (zcode == 233 || zcode == 169) {
/* EBCDIC is assumed - ZCODE is the EBCDIC code of either lower or
upper case 'Z'. */
if ((inta >= 129 && inta <= 137) ||
(inta >= 145 && inta <= 153) ||
(inta >= 162 && inta <= 169)
)
inta += 64;
if (
(intb >= 129 && intb <= 137) ||
(intb >= 145 && intb <= 153) ||
(intb >= 162 && intb <= 169)
)
intb += 64;
} else if (zcode == 218 || zcode == 250) {
/* ASCII is assumed, on Prime machines - ZCODE is the ASCII code
plus 128 of either lower or upper case 'Z'. */
if (inta >= 225 && inta <= 250) inta += -32;
if (intb >= 225 && intb <= 250) intb += -32;
}
ret_val = inta == intb;
return ret_val;
} /* lsame_ */
/* Subroutine */ static int NL_FORTRAN_WRAP(xerbla)(const char *srname, int *info)
{
/* -- LAPACK auxiliary routine (version 2.0) --
Univ. of Tennessee, Univ. of California Berkeley, NAG Ltd.,
Courant Institute, Argonne National Lab, and Rice University
September 30, 1994
Purpose
=======
XERBLA is an error handler for the LAPACK routines.
It is called by an LAPACK routine if an input parameter has an
invalid value. A message is printed and execution stops.
Installers may consider modifying the STOP statement in order to
call system-specific exception-handling facilities.
Arguments
=========
SRNAME (input) CHARACTER*6
The name of the routine which called XERBLA.
INFO (input) INT
The position of the invalid parameter in the parameter list
of the calling routine.
=====================================================================
*/
nl_fprintf(stderr, "** On entry to %6s, parameter number %2d had an illegal value\n",
srname, *info);
/* End of XERBLA */
return 0;
} /* xerbla_ */
/* Subroutine */ static int NL_FORTRAN_WRAP(daxpy)(integer *n, doublereal *da, doublereal *dx,
integer *incx, doublereal *dy, integer *incy)
{
/* System generated locals */
integer i__1;
/* Local variables */
static integer i, m, ix, iy, mp1;
/* constant times a vector plus a vector.
uses unrolled loops for increments equal to one.
jack dongarra, linpack, 3/11/78.
modified 12/3/93, array(1) declarations changed to array(*)
Parameter adjustments
Function Body */
#define DY(I) dy[(I)-1]
#define DX(I) dx[(I)-1]
if (*n <= 0) {
return 0;
}
if (*da == 0.) {
return 0;
}
if (*incx == 1 && *incy == 1) {
goto L20;
}
/* code for unequal increments or equal increments
not equal to 1 */
ix = 1;
iy = 1;
if (*incx < 0) {
ix = (-(*n) + 1) * *incx + 1;
}
if (*incy < 0) {
iy = (-(*n) + 1) * *incy + 1;
}
i__1 = *n;
for (i = 1; i <= *n; ++i) {
DY(iy) += *da * DX(ix);
ix += *incx;
iy += *incy;
/* L10: */
}
return 0;
/* code for both increments equal to 1
clean-up loop */
L20:
m = *n % 4;
if (m == 0) {
goto L40;
}
i__1 = m;
for (i = 1; i <= m; ++i) {
DY(i) += *da * DX(i);
/* L30: */
}
if (*n < 4) {
return 0;
}
L40:
mp1 = m + 1;
i__1 = *n;
for (i = mp1; i <= *n; i += 4) {
DY(i) += *da * DX(i);
DY(i + 1) += *da * DX(i + 1);
DY(i + 2) += *da * DX(i + 2);
DY(i + 3) += *da * DX(i + 3);
/* L50: */
}
nl_arg_used(i__1);
return 0;
} /* daxpy_ */
#undef DY
#undef DX
static doublereal NL_FORTRAN_WRAP(ddot)(integer *n, doublereal *dx, integer *incx, doublereal *dy,
integer *incy)
{
/* System generated locals */
integer i__1;
doublereal ret_val;
/* Local variables */
static integer i, m;
static doublereal dtemp;
static integer ix, iy, mp1;
/* forms the dot product of two vectors.
uses unrolled loops for increments equal to one.
jack dongarra, linpack, 3/11/78.
modified 12/3/93, array(1) declarations changed to array(*)
Parameter adjustments
Function Body */
#define DY(I) dy[(I)-1]
#define DX(I) dx[(I)-1]
ret_val = 0.;
dtemp = 0.;
if (*n <= 0) {
return ret_val;
}
if (*incx == 1 && *incy == 1) {
goto L20;
}
/* code for unequal increments or equal increments
not equal to 1 */
ix = 1;
iy = 1;
if (*incx < 0) {
ix = (-(*n) + 1) * *incx + 1;
}
if (*incy < 0) {
iy = (-(*n) + 1) * *incy + 1;
}
i__1 = *n;
for (i = 1; i <= *n; ++i) {
dtemp += DX(ix) * DY(iy);
ix += *incx;
iy += *incy;
/* L10: */
}
ret_val = dtemp;
return ret_val;
/* code for both increments equal to 1
clean-up loop */
L20:
m = *n % 5;
if (m == 0) {
goto L40;
}
i__1 = m;
for (i = 1; i <= m; ++i) {
dtemp += DX(i) * DY(i);
/* L30: */
}
if (*n < 5) {
goto L60;
}
L40:
mp1 = m + 1;
i__1 = *n;
for (i = mp1; i <= *n; i += 5) {
dtemp = dtemp + DX(i) * DY(i) + DX(i + 1) * DY(i + 1) + DX(i + 2) *
DY(i + 2) + DX(i + 3) * DY(i + 3) + DX(i + 4) * DY(i + 4);
/* L50: */
}
L60:
ret_val = dtemp;
nl_arg_used(i__1);
return ret_val;
} /* ddot_ */
#undef DY
#undef DX
/* Subroutine */ static int NL_FORTRAN_WRAP(dscal)(integer *n, doublereal *da, doublereal *dx,
integer *incx)
{
/* System generated locals */
integer i__1, i__2;
/* Local variables */
static integer i, m, nincx, mp1;
/* scales a vector by a constant.
uses unrolled loops for increment equal to one.
jack dongarra, linpack, 3/11/78.
modified 3/93 to return if incx .le. 0.
modified 12/3/93, array(1) declarations changed to array(*)
Parameter adjustments
Function Body */
#ifdef DX
#undef DX
#endif
#define DX(I) dx[(I)-1]
if (*n <= 0 || *incx <= 0) {
return 0;
}
if (*incx == 1) {
goto L20;
}
/* code for increment not equal to 1 */
nincx = *n * *incx;
i__1 = nincx;
i__2 = *incx;
for (i = 1; *incx < 0 ? i >= nincx : i <= nincx; i += *incx) {
DX(i) = *da * DX(i);
/* L10: */
}
return 0;
/* code for increment equal to 1
clean-up loop */
L20:
m = *n % 5;
if (m == 0) {
goto L40;
}
i__2 = m;
for (i = 1; i <= m; ++i) {
DX(i) = *da * DX(i);
/* L30: */
}
if (*n < 5) {
return 0;
}
L40:
mp1 = m + 1;
i__2 = *n;
for (i = mp1; i <= *n; i += 5) {
DX(i) = *da * DX(i);
DX(i + 1) = *da * DX(i + 1);
DX(i + 2) = *da * DX(i + 2);
DX(i + 3) = *da * DX(i + 3);
DX(i + 4) = *da * DX(i + 4);
/* L50: */
}
nl_arg_used(i__1);
nl_arg_used(i__2);
return 0;
} /* dscal_ */
#undef DX
static doublereal NL_FORTRAN_WRAP(dnrm2)(integer *n, doublereal *x, integer *incx)
{
/* System generated locals */
integer i__1, i__2;
doublereal ret_val, d__1;
/* Builtin functions */
/* BL: already declared in the included <math.h>,
we do not need it here. */
/*double sqrt(doublereal); */
/* Local variables */
static doublereal norm, scale, absxi;
static integer ix;
static doublereal ssq;
/* DNRM2 returns the euclidean norm of a vector via the function
name, so that
DNRM2 := sqrt( x'*x )
-- This version written on 25-October-1982.
Modified on 14-October-1993 to inline the call to DLASSQ.
Sven Hammarling, Nag Ltd.
Parameter adjustments
Function Body */
#ifdef X
#undef X
#endif
#define X(I) x[(I)-1]
if (*n < 1 || *incx < 1) {
norm = 0.;
} else if (*n == 1) {
norm = fabs(X(1));
} else {
scale = 0.;
ssq = 1.;
/* The following loop is equivalent to this call to the LAPACK
auxiliary routine:
CALL DLASSQ( N, X, INCX, SCALE, SSQ ) */
i__1 = (*n - 1) * *incx + 1;
i__2 = *incx;
for (ix = 1; *incx < 0 ? ix >= (*n-1)**incx+1 : ix <= (*n-1)**incx+1; ix += *incx) {
if (X(ix) != 0.) {
absxi = (d__1 = X(ix), fabs(d__1));
if (scale < absxi) {
/* Computing 2nd power */
d__1 = scale / absxi;
ssq = ssq * (d__1 * d__1) + 1.;
scale = absxi;
} else {
/* Computing 2nd power */
d__1 = absxi / scale;
ssq += d__1 * d__1;
}
}
/* L10: */
}
norm = scale * sqrt(ssq);
}
ret_val = norm;
nl_arg_used(i__1);
nl_arg_used(i__2);
return ret_val;
/* End of DNRM2. */
} /* dnrm2_ */
#undef X
/* Subroutine */ static int NL_FORTRAN_WRAP(dcopy)(integer *n, doublereal *dx, integer *incx,
doublereal *dy, integer *incy)
{
/* System generated locals */
integer i__1;
/* Local variables */
static integer i, m, ix, iy, mp1;
/* copies a vector, x, to a vector, y.
uses unrolled loops for increments equal to one.
jack dongarra, linpack, 3/11/78.
modified 12/3/93, array(1) declarations changed to array(*)
Parameter adjustments
Function Body */
#define DY(I) dy[(I)-1]
#define DX(I) dx[(I)-1]
if (*n <= 0) {
return 0;
}
if (*incx == 1 && *incy == 1) {
goto L20;
}
/* code for unequal increments or equal increments
not equal to 1 */
ix = 1;
iy = 1;
if (*incx < 0) {
ix = (-(*n) + 1) * *incx + 1;
}
if (*incy < 0) {
iy = (-(*n) + 1) * *incy + 1;
}
i__1 = *n;
for (i = 1; i <= *n; ++i) {
DY(iy) = DX(ix);
ix += *incx;
iy += *incy;
/* L10: */
}
return 0;
/* code for both increments equal to 1
clean-up loop */
L20:
m = *n % 7;
if (m == 0) {
goto L40;
}
i__1 = m;
for (i = 1; i <= m; ++i) {
DY(i) = DX(i);
/* L30: */
}
if (*n < 7) {
return 0;
}
L40:
mp1 = m + 1;
i__1 = *n;
for (i = mp1; i <= *n; i += 7) {
DY(i) = DX(i);
DY(i + 1) = DX(i + 1);
DY(i + 2) = DX(i + 2);
DY(i + 3) = DX(i + 3);
DY(i + 4) = DX(i + 4);
DY(i + 5) = DX(i + 5);
DY(i + 6) = DX(i + 6);
/* L50: */
}
nl_arg_used(i__1);
return 0;
} /* dcopy_ */
#undef DX
#undef DY
/* Subroutine */ static int NL_FORTRAN_WRAP(dgemv)(const char *trans, integer *m, integer *n, doublereal *
alpha, doublereal *a, integer *lda, doublereal *x, integer *incx,
doublereal *beta, doublereal *y, integer *incy)
{
/* System generated locals */
/* integer a_dim1, a_offset ; */
integer i__1, i__2;
/* Local variables */
static integer info;
static doublereal temp;
static integer lenx, leny, i, j;
/* extern logical lsame_(char *, char *); */
static integer ix, iy, jx, jy, kx, ky;
/* extern int xerbla_(char *, integer *); */
/* Purpose
=======
DGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or y := alpha*A'*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Parameters
==========
TRANS - CHARACTER*1.
On entry, TRANS specifies the operation to be performed as
follows:
TRANS = 'N' or 'n' y := alpha*A*x + beta*y.
TRANS = 'T' or 't' y := alpha*A'*x + beta*y.
TRANS = 'C' or 'c' y := alpha*A'*x + beta*y.
Unchanged on exit.
M - INTEGER.
On entry, M specifies the number of rows of the matrix A.
M must be at least zero.
Unchanged on exit.
N - INTEGER.
On entry, N specifies the number of columns of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA - DOUBLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A - DOUBLE PRECISION array of DIMENSION ( LDA, n ).
Before entry, the leading m by n part of the array A must
contain the matrix of coefficients.
Unchanged on exit.
LDA - INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, m ).
Unchanged on exit.
X - DOUBLE PRECISION array of DIMENSION at least
( 1 + ( n - 1 )*abs( INCX ) ) when TRANS = 'N' or 'n'
and at least
( 1 + ( m - 1 )*abs( INCX ) ) otherwise.
Before entry, the incremented array X must contain the
vector x.
Unchanged on exit.
INCX - INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA - DOUBLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y - DOUBLE PRECISION array of DIMENSION at least
( 1 + ( m - 1 )*abs( INCY ) ) when TRANS = 'N' or 'n'
and at least
( 1 + ( n - 1 )*abs( INCY ) ) otherwise.
Before entry with BETA non-zero, the incremented array Y
must contain the vector y. On exit, Y is overwritten by the
updated vector y.
INCY - INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
Level 2 Blas routine.
-- Written on 22-October-1986.
Jack Dongarra, Argonne National Lab.
Jeremy Du Croz, Nag Central Office.
Sven Hammarling, Nag Central Office.
Richard Hanson, Sandia National Labs.
Test the input parameters.
Parameter adjustments
Function Body */
#define X(I) x[(I)-1]
#define Y(I) y[(I)-1]
#define A(I,J) a[(I)-1 + ((J)-1)* ( *lda)]
info = 0;
if (! NL_FORTRAN_WRAP(lsame)(trans, "N") && ! NL_FORTRAN_WRAP(lsame)(trans, "T") && !
NL_FORTRAN_WRAP(lsame)(trans, "C")) {
info = 1;
} else if (*m < 0) {
info = 2;
} else if (*n < 0) {
info = 3;
} else if (*lda < max(1,*m)) {
info = 6;
} else if (*incx == 0) {
info = 8;
} else if (*incy == 0) {
info = 11;
}
if (info != 0) {
NL_FORTRAN_WRAP(xerbla)("DGEMV ", &info);
return 0;
}
/* Quick return if possible. */
if (*m == 0 || *n == 0 || (*alpha == 0. && *beta == 1.)) {
return 0;
}
/* Set LENX and LENY, the lengths of the vectors x and y, and set
up the start points in X and Y. */
if (NL_FORTRAN_WRAP(lsame)(trans, "N")) {
lenx = *n;
leny = *m;
} else {
lenx = *m;
leny = *n;
}
if (*incx > 0) {
kx = 1;
} else {
kx = 1 - (lenx - 1) * *incx;
}
if (*incy > 0) {
ky = 1;
} else {
ky = 1 - (leny - 1) * *incy;
}
/* Start the operations. In this version the elements of A are
accessed sequentially with one pass through A.
First form y := beta*y. */
if (*beta != 1.) {
if (*incy == 1) {
if (*beta == 0.) {
i__1 = leny;
for (i = 1; i <= leny; ++i) {
Y(i) = 0.;
/* L10: */
}
} else {
i__1 = leny;
for (i = 1; i <= leny; ++i) {
Y(i) = *beta * Y(i);
/* L20: */
}
}
} else {
iy = ky;
if (*beta == 0.) {
i__1 = leny;
for (i = 1; i <= leny; ++i) {
Y(iy) = 0.;
iy += *incy;
/* L30: */
}
} else {
i__1 = leny;
for (i = 1; i <= leny; ++i) {
Y(iy) = *beta * Y(iy);
iy += *incy;
/* L40: */
}
}
}
}
if (*alpha == 0.) {
return 0;
}
if (NL_FORTRAN_WRAP(lsame)(trans, "N")) {
/* Form y := alpha*A*x + y. */
jx = kx;
if (*incy == 1) {
i__1 = *n;
for (j = 1; j <= *n; ++j) {
if (X(jx) != 0.) {
temp = *alpha * X(jx);
i__2 = *m;
for (i = 1; i <= *m; ++i) {
Y(i) += temp * A(i,j);
/* L50: */
}
}
jx += *incx;
/* L60: */
}
} else {
i__1 = *n;
for (j = 1; j <= *n; ++j) {
if (X(jx) != 0.) {
temp = *alpha * X(jx);
iy = ky;
i__2 = *m;
for (i = 1; i <= *m; ++i) {
Y(iy) += temp * A(i,j);
iy += *incy;
/* L70: */
}
}
jx += *incx;
/* L80: */
}
}
} else {
/* Form y := alpha*A'*x + y. */
jy = ky;
if (*incx == 1) {
i__1 = *n;
for (j = 1; j <= *n; ++j) {
temp = 0.;
i__2 = *m;
for (i = 1; i <= *m; ++i) {
temp += A(i,j) * X(i);
/* L90: */
}
Y(jy) += *alpha * temp;
jy += *incy;
/* L100: */
}
} else {
i__1 = *n;
for (j = 1; j <= *n; ++j) {
temp = 0.;
ix = kx;
i__2 = *m;
for (i = 1; i <= *m; ++i) {
temp += A(i,j) * X(ix);
ix += *incx;
/* L110: */
}
Y(jy) += *alpha * temp;
jy += *incy;
/* L120: */
}
}
}
nl_arg_used(i__1);
nl_arg_used(i__2);
return 0;
/* End of DGEMV . */
} /* dgemv_ */
#undef X
#undef Y
#undef A
#else
extern void NL_FORTRAN_WRAP(daxpy)(
int *n, double *alpha, double *x,
int *incx, double *y, int *incy
) ;
extern double NL_FORTRAN_WRAP(dnrm2)( int *n, double *x, int *incx ) ;
extern int NL_FORTRAN_WRAP(dcopy)(int* n, double* dx, int* incx, double* dy, int* incy) ;
extern void NL_FORTRAN_WRAP(dscal)(int* n, double* alpha, double *x, int* incx) ;
#ifndef NEEDS_DTPSV
extern void NL_FORTRAN_WRAP(dtpsv)(
char *uplo, char *trans, char *diag,
int *n, double *AP, double *x, int *incx
) ;
#endif
extern void NL_FORTRAN_WRAP(dgemv)(
char *trans, int *m, int *n,
double *alpha, double *A, int *ldA,
double *x, int *incx,
double *beta, double *y, int *incy
) ;
#endif
#ifdef NEEDS_DTPSV
/* DECK DTPSV */
/* Subroutine */
static int NL_FORTRAN_WRAP(dtpsv)(
const char* uplo,
const char* trans,
const char* diag,
integer* n,
doublereal* ap,
doublereal* x,
integer* incx
) {
/* System generated locals */
integer i__1, i__2;
/* Local variables */
static integer info;
static doublereal temp;
static integer i__, j, k;
/* extern logical lsame_(); */
static integer kk, ix, jx, kx;
/* extern int xerbla_(); */
static logical nounit;
/* ***BEGIN PROLOGUE DTPSV */
/* ***PURPOSE Solve one of the systems of equations. */
/* ***LIBRARY SLATEC (BLAS) */
/* ***CATEGORY D1B4 */
/* ***TYPE DOUBLE PRECISION (STPSV-S, DTPSV-D, CTPSV-C) */
/* ***KEYWORDS LEVEL 2 BLAS, LINEAR ALGEBRA */
/* ***AUTHOR Dongarra, J. J., (ANL) */
/* Du Croz, J., (NAG) */
/* Hammarling, S., (NAG) */
/* Hanson, R. J., (SNLA) */
/* ***DESCRIPTION */
/* DTPSV solves one of the systems of equations */
/* A*x = b, or A'*x = b, */
/* where b and x are n element vectors and A is an n by n unit, or */
/* non-unit, upper or lower triangular matrix, supplied in packed form. */
/* No test for singularity or near-singularity is included in this */
/* routine. Such tests must be performed before calling this routine. */
/* Parameters */
/* ========== */
/* UPLO - CHARACTER*1. */
/* On entry, UPLO specifies whether the matrix is an upper or */
/* lower triangular matrix as follows: */
/* UPLO = 'U' or 'u' A is an upper triangular matrix. */
/* UPLO = 'L' or 'l' A is a lower triangular matrix. */
/* Unchanged on exit. */
/* TRANS - CHARACTER*1. */
/* On entry, TRANS specifies the equations to be solved as */
/* follows: */
/* TRANS = 'N' or 'n' A*x = b. */
/* TRANS = 'T' or 't' A'*x = b. */
/* TRANS = 'C' or 'c' A'*x = b. */
/* Unchanged on exit. */
/* DIAG - CHARACTER*1. */
/* On entry, DIAG specifies whether or not A is unit */
/* triangular as follows: */
/* DIAG = 'U' or 'u' A is assumed to be unit triangular. */
/* DIAG = 'N' or 'n' A is not assumed to be unit */
/* triangular. */
/* Unchanged on exit. */
/* N - INTEGER. */
/* On entry, N specifies the order of the matrix A. */
/* N must be at least zero. */
/* Unchanged on exit. */
/* AP - DOUBLE PRECISION array of DIMENSION at least */
/* ( ( n*( n + 1))/2). */
/* Before entry with UPLO = 'U' or 'u', the array AP must */
/* contain the upper triangular matrix packed sequentially, */
/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
/* AP( 2 ) and AP( 3 ) contain a( 1, 2 ) and a( 2, 2 ) */
/* respectively, and so on. */
/* Before entry with UPLO = 'L' or 'l', the array AP must */
/* contain the lower triangular matrix packed sequentially, */
/* column by column, so that AP( 1 ) contains a( 1, 1 ), */
/* AP( 2 ) and AP( 3 ) contain a( 2, 1 ) and a( 3, 1 ) */
/* respectively, and so on. */
/* Note that when DIAG = 'U' or 'u', the diagonal elements of */
/* A are not referenced, but are assumed to be unity. */
/* Unchanged on exit. */
/* X - DOUBLE PRECISION array of dimension at least */
/* ( 1 + ( n - 1 )*abs( INCX ) ). */
/* Before entry, the incremented array X must contain the n */
/* element right-hand side vector b. On exit, X is overwritten */
/* with the solution vector x. */
/* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */
/* X. INCX must not be zero. */
/* Unchanged on exit. */
/* ***REFERENCES Dongarra, J. J., Du Croz, J., Hammarling, S., and */
/* Hanson, R. J. An extended set of Fortran basic linear */
/* algebra subprograms. ACM TOMS, Vol. 14, No. 1, */
/* pp. 1-17, March 1988. */
/* ***ROUTINES CALLED LSAME, XERBLA */
/* ***REVISION HISTORY (YYMMDD) */
/* 861022 DATE WRITTEN */
/* 910605 Modified to meet SLATEC prologue standards. Only comment */
/* lines were modified. (BKS) */
/* ***END PROLOGUE DTPSV */
/* .. Scalar Arguments .. */
/* .. Array Arguments .. */
/* .. Parameters .. */
/* .. Local Scalars .. */
/* .. External Functions .. */
/* .. External Subroutines .. */
/* ***FIRST EXECUTABLE STATEMENT DTPSV */
/* Test the input parameters. */
/* Parameter adjustments */
--x;
--ap;
/* Function Body */
info = 0;
if (!NL_FORTRAN_WRAP(lsame)(uplo, "U") &&
!NL_FORTRAN_WRAP(lsame)(uplo, "L")
) {
info = 1;
} else if (
!NL_FORTRAN_WRAP(lsame)(trans, "N") &&
!NL_FORTRAN_WRAP(lsame)(trans, "T") &&
!NL_FORTRAN_WRAP(lsame)(trans, "C")
) {
info = 2;
} else if (
!NL_FORTRAN_WRAP(lsame)(diag, "U") &&
!NL_FORTRAN_WRAP(lsame)(diag, "N")
) {
info = 3;
} else if (*n < 0) {
info = 4;
} else if (*incx == 0) {
info = 7;
}
if (info != 0) {
NL_FORTRAN_WRAP(xerbla)("DTPSV ", &info);
return 0;
}
/* Quick return if possible. */
if (*n == 0) {
return 0;
}
nounit = (logical)(NL_FORTRAN_WRAP(lsame)(diag, "N"));
/* Set up the start point in X if the increment is not unity. This */
/* will be ( N - 1 )*INCX too small for descending loops. */
if (*incx <= 0) {
kx = 1 - (*n - 1) * *incx;
} else if (*incx != 1) {
kx = 1;
}
/* Start the operations. In this version the elements of AP are */
/* accessed sequentially with one pass through AP. */
if (NL_FORTRAN_WRAP(lsame)(trans, "N")) {
/* Form x := inv( A )*x. */
if (NL_FORTRAN_WRAP(lsame)(uplo, "U")) {
kk = *n * (*n + 1) / 2;
if (*incx == 1) {
for (j = *n; j >= 1; --j) {
if (x[j] != 0.) {
if (nounit) {
x[j] /= ap[kk];
}
temp = x[j];
k = kk - 1;
for (i__ = j - 1; i__ >= 1; --i__) {
x[i__] -= temp * ap[k];
--k;
/* L10: */
}
}
kk -= j;
/* L20: */
}
} else {
jx = kx + (*n - 1) * *incx;
for (j = *n; j >= 1; --j) {
if (x[jx] != 0.) {
if (nounit) {
x[jx] /= ap[kk];
}
temp = x[jx];
ix = jx;
i__1 = kk - j + 1;
for (k = kk - 1; k >= i__1; --k) {
ix -= *incx;
x[ix] -= temp * ap[k];
/* L30: */
}
}
jx -= *incx;
kk -= j;
/* L40: */
}
}
} else {
kk = 1;
if (*incx == 1) {
i__1 = *n;
for (j = 1; j <= i__1; ++j) {
if (x[j] != 0.) {
if (nounit) {
x[j] /= ap[kk];
}
temp = x[j];
k = kk + 1;
i__2 = *n;
for (i__ = j + 1; i__ <= i__2; ++i__) {
x[i__] -= temp * ap[k];
++k;
/* L50: */
}
}
kk += *n - j + 1;
/* L60: */
}
} else {
jx = kx;
i__1 = *n;
for (j = 1; j <= i__1; ++j) {
if (x[jx] != 0.) {
if (nounit) {
x[jx] /= ap[kk];
}
temp = x[jx];
ix = jx;
i__2 = kk + *n - j;
for (k = kk + 1; k <= i__2; ++k) {
ix += *incx;
x[ix] -= temp * ap[k];
/* L70: */
}
}
jx += *incx;
kk += *n - j + 1;
/* L80: */
}
}
}
} else {
/* Form x := inv( A' )*x. */
if (NL_FORTRAN_WRAP(lsame)(uplo, "U")) {
kk = 1;
if (*incx == 1) {
i__1 = *n;
for (j = 1; j <= i__1; ++j) {
temp = x[j];
k = kk;
i__2 = j - 1;
for (i__ = 1; i__ <= i__2; ++i__) {
temp -= ap[k] * x[i__];
++k;
/* L90: */
}
if (nounit) {
temp /= ap[kk + j - 1];
}
x[j] = temp;
kk += j;
/* L100: */
}
} else {
jx = kx;
i__1 = *n;
for (j = 1; j <= i__1; ++j) {
temp = x[jx];
ix = kx;
i__2 = kk + j - 2;
for (k = kk; k <= i__2; ++k) {
temp -= ap[k] * x[ix];
ix += *incx;
/* L110: */
}
if (nounit) {
temp /= ap[kk + j - 1];
}
x[jx] = temp;
jx += *incx;
kk += j;
/* L120: */
}
}
} else {
kk = *n * (*n + 1) / 2;
if (*incx == 1) {
for (j = *n; j >= 1; --j) {
temp = x[j];
k = kk;
i__1 = j + 1;
for (i__ = *n; i__ >= i__1; --i__) {
temp -= ap[k] * x[i__];
--k;
/* L130: */
}
if (nounit) {
temp /= ap[kk - *n + j];
}
x[j] = temp;
kk -= *n - j + 1;
/* L140: */
}
} else {
kx += (*n - 1) * *incx;
jx = kx;
for (j = *n; j >= 1; --j) {
temp = x[jx];
ix = kx;
i__1 = kk - (*n - (j + 1));
for (k = kk; k >= i__1; --k) {
temp -= ap[k] * x[ix];
ix -= *incx;
/* L150: */
}
if (nounit) {
temp /= ap[kk - *n + j];
}
x[jx] = temp;
jx -= *incx;
kk -= *n - j + 1;
/* L160: */
}
}
}
}
return 0;
/* End of DTPSV . */
} /* dtpsv_ */
#endif
/* End of BLAS routines */
/* Abstract BLAS interface */
void nlBlasResetStats(NLBlas_t blas) {
blas->start_time = nlCurrentTime();
blas->flops = 0;
blas->used_ram[0] = 0;
blas->used_ram[1] = 0;
blas->max_used_ram[0] = 0;
blas->max_used_ram[1] = 0;
blas->sq_rnorm = 0.0;
blas->sq_bnorm = 0.0;
}
double nlBlasGFlops(NLBlas_t blas) {
double now = nlCurrentTime();
double elapsed_time = now - blas->start_time;
return (NLdouble)(blas->flops) / (elapsed_time * 1e9);
}
NLulong nlBlasUsedRam(NLBlas_t blas, NLmemoryType type) {
return blas->used_ram[type];
}
NLulong nlBlasMaxUsedRam(NLBlas_t blas, NLmemoryType type) {
return blas->max_used_ram[type];
}
NLboolean nlBlasHasUnifiedMemory(NLBlas_t blas) {
return blas->has_unified_memory;
}
static void* host_blas_malloc(
NLBlas_t blas, NLmemoryType type, size_t size
) {
nl_arg_used(type);
blas->used_ram[type] += (NLulong)size;
blas->max_used_ram[type] = MAX(
blas->max_used_ram[type],blas->used_ram[type]
);
return malloc(size);
}
static void host_blas_free(
NLBlas_t blas, NLmemoryType type, size_t size, void* ptr
) {
nl_arg_used(type);
blas->used_ram[type] -= (NLulong)size;
free(ptr);
}
static void host_blas_memcpy(
NLBlas_t blas,
void* to, NLmemoryType to_type,
void* from, NLmemoryType from_type,
size_t size
) {
nl_arg_used(blas);
nl_arg_used(to_type);
nl_arg_used(from_type);
memcpy(to,from,size);
}
static void host_blas_dcopy(
NLBlas_t blas, int n, const double *x, int incx, double *y, int incy
) {
nl_arg_used(blas);
NL_FORTRAN_WRAP(dcopy)(&n,(double*)x,&incx,y,&incy);
}
static double host_blas_ddot(
NLBlas_t blas, int n, const double *x, int incx, const double *y, int incy
) {
blas->flops += (NLulong)(2*n);
return NL_FORTRAN_WRAP(ddot)(&n,(double*)x,&incx,(double*)y,&incy);
}
static double host_blas_dnrm2(
NLBlas_t blas, int n, const double *x, int incx
) {
blas->flops += (NLulong)(2*n);
return NL_FORTRAN_WRAP(dnrm2)(&n,(double*)x,&incx);
}
static void host_blas_daxpy(
NLBlas_t blas, int n, double a, const double *x, int incx, double *y, int incy
) {
blas->flops += (NLulong)(2*n);
NL_FORTRAN_WRAP(daxpy)(&n,&a,(double*)x,&incx,y,&incy);
}
static void host_blas_dscal(
NLBlas_t blas, int n, double a, double *x, int incx
) {
blas->flops += (NLulong)n;
NL_FORTRAN_WRAP(dscal)(&n,&a,x,&incx);
}
static void host_blas_dgemv(
NLBlas_t blas, MatrixTranspose trans, int m, int n, double alpha,
const double *A, int ldA, const double *x, int incx,
double beta, double *y, int incy
) {
static const char *T[3] = { "N", "T", 0 };
nl_arg_used(blas);
NL_FORTRAN_WRAP(dgemv)(
T[(int)trans],&m,&n,&alpha,(double*)A,&ldA,
(double*)x,&incx,&beta,y,&incy
);
/* TODO: update flops */
}
static void host_blas_dtpsv(
NLBlas_t blas, MatrixTriangle uplo, MatrixTranspose trans,
MatrixUnitTriangular diag, int n, const double *AP,
double *x, int incx
) {
static const char *UL[2] = { "U", "L" };
static const char *T[3] = { "N", "T", 0 };
static const char *D[2] = { "U", "N" };
nl_arg_used(blas);
NL_FORTRAN_WRAP(dtpsv)(
UL[(int)uplo],T[(int)trans],D[(int)diag],&n,(double*)AP,x,&incx
);
/* TODO: update flops */
}
NLBlas_t nlHostBlas() {
static NLboolean initialized = NL_FALSE;
static struct NLBlas blas;
if(!initialized) {
memset(&blas, 0, sizeof(blas));
blas.has_unified_memory = NL_TRUE;
blas.Malloc = host_blas_malloc;
blas.Free = host_blas_free;
blas.Memcpy = host_blas_memcpy;
blas.Dcopy = host_blas_dcopy;
blas.Ddot = host_blas_ddot;
blas.Dnrm2 = host_blas_dnrm2;
blas.Daxpy = host_blas_daxpy;
blas.Dscal = host_blas_dscal;
blas.Dgemv = host_blas_dgemv;
blas.Dtpsv = host_blas_dtpsv;
nlBlasResetStats(&blas);
initialized = NL_TRUE;
}
return &blas;
}
/******* extracted from nl_iterative_solvers.c *******/
/* Solvers */
/*
* The implementation of the solvers is inspired by
* the lsolver library, by Christian Badura, available from:
* http://www.mathematik.uni-freiburg.de
* /IAM/Research/projectskr/lin_solver/
*
* About the Conjugate Gradient, details can be found in:
* Ashby, Manteuffel, Saylor
* A taxononmy for conjugate gradient methods
* SIAM J Numer Anal 27, 1542-1568 (1990)
*
* This version is completely abstract, the same code can be used for
* CPU/GPU, dense matrix / sparse matrix etc...
* Abstraction is realized through:
* - Abstract blas interface (NLBlas_t), that can implement BLAS
* operations on the CPU or on the GPU.
* - Abstract matrix interface (NLMatrix), that can implement different
* versions of matrix x vector product (CPU/GPU, sparse/dense ...)
*/
static NLuint nlSolveSystem_CG(
NLBlas_t blas,
NLMatrix M, NLdouble* b, NLdouble* x,
double eps, NLuint max_iter
) {
NLint N = (NLint)M->m;
NLdouble *g = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *r = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *p = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLuint its=0;
NLdouble t, tau, sig, rho, gam;
NLdouble b_square=blas->Ddot(blas,N,b,1,b,1);
NLdouble err=eps*eps*b_square;
NLdouble curr_err;
nlMultMatrixVector(M,x,g);
blas->Daxpy(blas,N,-1.,b,1,g,1);
blas->Dscal(blas,N,-1.,g,1);
blas->Dcopy(blas,N,g,1,r,1);
curr_err = blas->Ddot(blas,N,g,1,g,1);
while ( curr_err >err && its < max_iter) {
if(nlCurrentContext != NULL) {
if(nlCurrentContext->progress_func != NULL) {
nlCurrentContext->progress_func(its, max_iter, curr_err, err);
}
if(nlCurrentContext->verbose && !(its % 100)) {
nl_printf ( "%d : %.10e -- %.10e\n", its, curr_err, err );
}
}
nlMultMatrixVector(M,r,p);
rho=blas->Ddot(blas,N,p,1,p,1);
sig=blas->Ddot(blas,N,r,1,p,1);
tau=blas->Ddot(blas,N,g,1,r,1);
t=tau/sig;
blas->Daxpy(blas,N,t,r,1,x,1);
blas->Daxpy(blas,N,-t,p,1,g,1);
gam=(t*t*rho-tau)/tau;
blas->Dscal(blas,N,gam,r,1);
blas->Daxpy(blas,N,1.,g,1,r,1);
++its;
curr_err = blas->Ddot(blas,N,g,1,g,1);
}
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, g);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, r);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, p);
blas->sq_bnorm = b_square;
blas->sq_rnorm = curr_err;
return its;
}
static NLuint nlSolveSystem_PRE_CG(
NLBlas_t blas,
NLMatrix M, NLMatrix P, NLdouble* b, NLdouble* x,
double eps, NLuint max_iter
) {
NLint N = (NLint)M->n;
NLdouble* r = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble* d = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble* h = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *Ad = h;
NLuint its=0;
NLdouble rh, alpha, beta;
NLdouble b_square = blas->Ddot(blas,N,b,1,b,1);
NLdouble err=eps*eps*b_square;
NLdouble curr_err;
nlMultMatrixVector(M,x,r);
blas->Daxpy(blas,N,-1.,b,1,r,1);
nlMultMatrixVector(P,r,d);
blas->Dcopy(blas,N,d,1,h,1);
rh=blas->Ddot(blas,N,r,1,h,1);
curr_err = blas->Ddot(blas,N,r,1,r,1);
while ( curr_err >err && its < max_iter) {
if(nlCurrentContext != NULL) {
if(nlCurrentContext->progress_func != NULL) {
nlCurrentContext->progress_func(its, max_iter, curr_err, err);
}
if( nlCurrentContext->verbose && !(its % 100)) {
nl_printf ( "%d : %.10e -- %.10e\n", its, curr_err, err );
}
}
nlMultMatrixVector(M,d,Ad);
alpha=rh/blas->Ddot(blas,N,d,1,Ad,1);
blas->Daxpy(blas,N,-alpha,d,1,x,1);
blas->Daxpy(blas,N,-alpha,Ad,1,r,1);
nlMultMatrixVector(P,r,h);
beta=1./rh;
rh=blas->Ddot(blas,N,r,1,h,1);
beta*=rh;
blas->Dscal(blas,N,beta,d,1);
blas->Daxpy(blas,N,1.,h,1,d,1);
++its;
curr_err = blas->Ddot(blas,N,r,1,r,1);
}
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, r);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, d);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, h);
blas->sq_bnorm = b_square;
blas->sq_rnorm = curr_err;
return its;
}
static NLuint nlSolveSystem_BICGSTAB(
NLBlas_t blas,
NLMatrix M, NLdouble* b, NLdouble* x,
double eps, NLuint max_iter
) {
NLint N = (NLint)M->n;
NLdouble *rT = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *d = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *h = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *u = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *Ad = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *t = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *s = h;
NLdouble rTh, rTAd, rTr, alpha, beta, omega, st, tt;
NLuint its=0;
NLdouble b_square = blas->Ddot(blas,N,b,1,b,1);
NLdouble err=eps*eps*b_square;
NLdouble *r = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
nlMultMatrixVector(M,x,r);
blas->Daxpy(blas,N,-1.,b,1,r,1);
blas->Dcopy(blas,N,r,1,d,1);
blas->Dcopy(blas,N,d,1,h,1);
blas->Dcopy(blas,N,h,1,rT,1);
nl_assert( blas->Ddot(blas,N,rT,1,rT,1)>1e-40 );
rTh=blas->Ddot(blas,N,rT,1,h,1);
rTr=blas->Ddot(blas,N,r,1,r,1);
while ( rTr>err && its < max_iter) {
if(nlCurrentContext != NULL) {
if(nlCurrentContext->progress_func != NULL) {
nlCurrentContext->progress_func(its, max_iter, rTr, err);
}
if( (nlCurrentContext->verbose) && !(its % 100)) {
nl_printf ( "%d : %.10e -- %.10e\n", its, rTr, err );
}
}
nlMultMatrixVector(M,d,Ad);
rTAd=blas->Ddot(blas,N,rT,1,Ad,1);
nl_assert( fabs(rTAd)>1e-40 );
alpha=rTh/rTAd;
blas->Daxpy(blas,N,-alpha,Ad,1,r,1);
blas->Dcopy(blas,N,h,1,s,1);
blas->Daxpy(blas,N,-alpha,Ad,1,s,1);
nlMultMatrixVector(M,s,t);
blas->Daxpy(blas,N,1.,t,1,u,1);
blas->Dscal(blas,N,alpha,u,1);
st=blas->Ddot(blas,N,s,1,t,1);
tt=blas->Ddot(blas,N,t,1,t,1);
if ( fabs(st)<1e-40 || fabs(tt)<1e-40 ) {
omega = 0.;
} else {
omega = st/tt;
}
blas->Daxpy(blas,N,-omega,t,1,r,1);
blas->Daxpy(blas,N,-alpha,d,1,x,1);
blas->Daxpy(blas,N,-omega,s,1,x,1);
blas->Dcopy(blas,N,s,1,h,1);
blas->Daxpy(blas,N,-omega,t,1,h,1);
beta=(alpha/omega)/rTh;
rTh=blas->Ddot(blas,N,rT,1,h,1);
beta*=rTh;
blas->Dscal(blas,N,beta,d,1);
blas->Daxpy(blas,N,1.,h,1,d,1);
blas->Daxpy(blas,N,-beta*omega,Ad,1,d,1);
rTr=blas->Ddot(blas,N,r,1,r,1);
++its;
}
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, r);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, rT);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, d);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, h);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, u);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, Ad);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, t);
blas->sq_bnorm = b_square;
blas->sq_rnorm = rTr;
return its;
}
static NLuint nlSolveSystem_PRE_BICGSTAB(
NLBlas_t blas,
NLMatrix M, NLMatrix P, NLdouble* b, NLdouble* x,
double eps, NLuint max_iter
) {
NLint N = (NLint)M->n;
NLdouble *rT = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *d = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *h = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *u = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *Sd = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *t = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *aux = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
NLdouble *s = h;
NLdouble rTh, rTSd, rTr, alpha, beta, omega, st, tt;
NLuint its=0;
NLdouble b_square = blas->Ddot(blas,N,b,1,b,1);
NLdouble err = eps*eps*b_square;
NLdouble *r = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, N);
nlMultMatrixVector(M,x,r);
blas->Daxpy(blas,N,-1.,b,1,r,1);
nlMultMatrixVector(P,r,d);
blas->Dcopy(blas,N,d,1,h,1);
blas->Dcopy(blas,N,h,1,rT,1);
nl_assert( blas->Ddot(blas,N,rT,1,rT,1)>1e-40 );
rTh=blas->Ddot(blas,N,rT,1,h,1);
rTr=blas->Ddot(blas,N,r,1,r,1);
while ( rTr>err && its < max_iter) {
if(nlCurrentContext != NULL) {
if(nlCurrentContext->progress_func != NULL) {
nlCurrentContext->progress_func(its, max_iter, rTr, err);
}
if( (nlCurrentContext->verbose) && !(its % 100)) {
nl_printf ( "%d : %.10e -- %.10e\n", its, rTr, err );
}
}
nlMultMatrixVector(M,d,aux);
nlMultMatrixVector(P,aux,Sd);
rTSd=blas->Ddot(blas,N,rT,1,Sd,1);
nl_assert( fabs(rTSd)>1e-40 );
alpha=rTh/rTSd;
blas->Daxpy(blas,N,-alpha,aux,1,r,1);
blas->Dcopy(blas,N,h,1,s,1);
blas->Daxpy(blas,N,-alpha,Sd,1,s,1);
nlMultMatrixVector(M,s,aux);
nlMultMatrixVector(P,aux,t);
blas->Daxpy(blas,N,1.,t,1,u,1);
blas->Dscal(blas,N,alpha,u,1);
st=blas->Ddot(blas,N,s,1,t,1);
tt=blas->Ddot(blas,N,t,1,t,1);
if ( fabs(st)<1e-40 || fabs(tt)<1e-40 ) {
omega = 0.;
} else {
omega = st/tt;
}
blas->Daxpy(blas,N,-omega,aux,1,r,1);
blas->Daxpy(blas,N,-alpha,d,1,x,1);
blas->Daxpy(blas,N,-omega,s,1,x,1);
blas->Dcopy(blas,N,s,1,h,1);
blas->Daxpy(blas,N,-omega,t,1,h,1);
beta=(alpha/omega)/rTh;
rTh=blas->Ddot(blas,N,rT,1,h,1);
beta*=rTh;
blas->Dscal(blas,N,beta,d,1);
blas->Daxpy(blas,N,1.,h,1,d,1);
blas->Daxpy(blas,N,-beta*omega,Sd,1,d,1);
rTr=blas->Ddot(blas,N,r,1,r,1);
++its;
}
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, r);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, rT);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, d);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, h);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, u);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, Sd);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, t);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, N, aux);
blas->sq_bnorm = b_square;
blas->sq_rnorm = rTr;
return its;
}
/*
* Note: this one cannot be executed on device (GPU)
* because it directly manipulates the vectors.
*/
static NLuint nlSolveSystem_GMRES(
NLBlas_t blas,
NLMatrix M, NLdouble* b, NLdouble* x,
double eps, NLuint max_iter, NLuint inner_iter
) {
NLint n = (NLint)M->n;
NLint m = (NLint)inner_iter;
typedef NLdouble *NLdoubleP;
NLdouble *V = NL_NEW_ARRAY(NLdouble, n*(m+1) );
NLdouble *U = NL_NEW_ARRAY(NLdouble, m*(m+1)/2 );
NLdouble *r = NL_NEW_ARRAY(NLdouble, n );
NLdouble *y = NL_NEW_ARRAY(NLdouble, m+1 );
NLdouble *c = NL_NEW_ARRAY(NLdouble, m );
NLdouble *s = NL_NEW_ARRAY(NLdouble, m );
NLdouble **v = NL_NEW_ARRAY(NLdoubleP, m+1 );
NLint i, j, io, uij, u0j;
NLint its = -1;
NLdouble beta, h, rd, dd, nrm2b;
/*
* The way it is written, this routine will not
* work on the GPU since it directly modifies the
* vectors.
*/
nl_assert(nlBlasHasUnifiedMemory(blas));
for ( i=0; i<=m; ++i ){
v[i]=V+i*n;
}
nrm2b=blas->Dnrm2(blas,n,b,1);
io=0;
do { /* outer loop */
++io;
nlMultMatrixVector(M,x,r);
blas->Daxpy(blas,n,-1.,b,1,r,1);
beta=blas->Dnrm2(blas,n,r,1);
blas->Dcopy(blas,n,r,1,v[0],1);
blas->Dscal(blas,n,1./beta,v[0],1);
y[0]=beta;
j=0;
uij=0;
do { /* inner loop: j=0,...,m-1 */
u0j=uij;
nlMultMatrixVector(M,v[j],v[j+1]);
blas->Dgemv(
blas,Transpose,n,j+1,1.,V,n,v[j+1],1,0.,U+u0j,1
);
blas->Dgemv(
blas,NoTranspose,n,j+1,-1.,V,n,U+u0j,1,1.,v[j+1],1
);
h=blas->Dnrm2(blas,n,v[j+1],1);
blas->Dscal(blas,n,1./h,v[j+1],1);
for (i=0; i<j; ++i ) { /* rotiere neue Spalte */
double tmp = c[i]*U[uij]-s[i]*U[uij+1];
U[uij+1] = s[i]*U[uij]+c[i]*U[uij+1];
U[uij] = tmp;
++uij;
}
{ /* berechne neue Rotation */
rd = U[uij];
dd = sqrt(rd*rd+h*h);
c[j] = rd/dd;
s[j] = -h/dd;
U[uij] = dd;
++uij;
}
{ /* rotiere rechte Seite y (vorher: y[j+1]=0) */
y[j+1] = s[j]*y[j];
y[j] = c[j]*y[j];
}
++j;
} while (
j<m && fabs(y[j])>=eps*nrm2b
);
{ /* minimiere bzgl Y */
blas->Dtpsv(
blas,
UpperTriangle,
NoTranspose,
NotUnitTriangular,
j,U,y,1
);
/* correct X */
blas->Dgemv(blas,NoTranspose,n,j,-1.,V,n,y,1,1.,x,1);
}
} while ( fabs(y[j])>=eps*nrm2b && (m*(io-1)+j) < (NLint)max_iter);
/* Count the inner iterations */
its = m*(io-1)+j;
blas->sq_bnorm = nrm2b*nrm2b;
blas->sq_rnorm = y[j]*y[j];
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, n, V);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, n, U);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, n, r);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, n, y);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, n, c);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, n, s);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, n, v);
return (NLuint)its;
}
/* Main driver routine */
NLuint nlSolveSystemIterative(
NLBlas_t blas,
NLMatrix M, NLMatrix P, NLdouble* b_in, NLdouble* x_in,
NLenum solver,
double eps, NLuint max_iter, NLuint inner_iter
) {
NLuint N = M->n;
NLuint result=0;
NLdouble rnorm=0.0;
NLdouble bnorm=0.0;
double* b = b_in;
double* x = x_in;
nl_assert(M->m == M->n);
if(!nlBlasHasUnifiedMemory(blas)) {
b = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, (int)M->n);
blas->Memcpy(
blas,
b, NL_DEVICE_MEMORY,
b_in, NL_HOST_MEMORY, (size_t)N*sizeof(double)
);
x = NL_NEW_VECTOR(blas, NL_DEVICE_MEMORY, (int)M->n);
blas->Memcpy(
blas,
x, NL_DEVICE_MEMORY,
x_in, NL_HOST_MEMORY, (size_t)N*sizeof(double)
);
}
switch(solver) {
case NL_CG:
if(P == NULL) {
result = nlSolveSystem_CG(blas,M,b,x,eps,max_iter);
} else {
result = nlSolveSystem_PRE_CG(blas,M,P,b,x,eps,max_iter);
}
break;
case NL_BICGSTAB:
if(P == NULL) {
result = nlSolveSystem_BICGSTAB(blas,M,b,x,eps,max_iter);
} else {
result = nlSolveSystem_PRE_BICGSTAB(blas,M,P,b,x,eps,max_iter);
}
break;
case NL_GMRES:
result = nlSolveSystem_GMRES(blas,M,b,x,eps,max_iter,inner_iter);
break;
default:
nl_assert_not_reached;
}
/* Get residual norm and rhs norm from BLAS context */
if(nlCurrentContext != NULL) {
bnorm = sqrt(blas->sq_bnorm);
rnorm = sqrt(blas->sq_rnorm);
if(bnorm == 0.0) {
nlCurrentContext->error = rnorm;
if(nlCurrentContext->verbose) {
nl_printf("in OpenNL : ||Ax-b|| = %e\n",nlCurrentContext->error);
}
} else {
nlCurrentContext->error = rnorm/bnorm;
if(nlCurrentContext->verbose) {
nl_printf("in OpenNL : ||Ax-b||/||b|| = %e\n",
nlCurrentContext->error
);
}
}
}
nlCurrentContext->used_iterations = result;
if(!nlBlasHasUnifiedMemory(blas)) {
blas->Memcpy(
blas,
x_in, NL_HOST_MEMORY, x, NL_DEVICE_MEMORY, (size_t)N*sizeof(double)
);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, (int)M->n, x);
NL_DELETE_VECTOR(blas, NL_DEVICE_MEMORY, (int)M->n, b);
}
return result;
}
/******* extracted from nl_preconditioners.c *******/
typedef struct {
NLuint m;
NLuint n;
NLenum type;
NLDestroyMatrixFunc destroy_func;
NLMultMatrixVectorFunc mult_func;
NLdouble* diag_inv;
} NLJacobiPreconditioner;
static void nlJacobiPreconditionerDestroy(NLJacobiPreconditioner* M) {
NL_DELETE_ARRAY(M->diag_inv);
}
static void nlJacobiPreconditionerMult(
NLJacobiPreconditioner* M, const double* x, double* y
) {
NLuint i;
for(i=0; i<M->n; ++i) {
y[i] = x[i] * M->diag_inv[i];
}
nlHostBlas()->flops += (NLulong)(M->n);
}
NLMatrix nlNewJacobiPreconditioner(NLMatrix M_in) {
NLSparseMatrix* M = NULL;
NLJacobiPreconditioner* result = NULL;
NLuint i;
nl_assert(M_in->type == NL_MATRIX_SPARSE_DYNAMIC);
nl_assert(M_in->m == M_in->n);
M = (NLSparseMatrix*)M_in;
result = NL_NEW(NLJacobiPreconditioner);
result->m = M->m;
result->n = M->n;
result->type = NL_MATRIX_OTHER;
result->destroy_func = (NLDestroyMatrixFunc)nlJacobiPreconditionerDestroy;
result->mult_func = (NLMultMatrixVectorFunc)nlJacobiPreconditionerMult;
result->diag_inv = NL_NEW_ARRAY(double, M->n);
for(i=0; i<M->n; ++i) {
result->diag_inv[i] = (M->diag[i] == 0.0) ? 1.0 : 1.0/M->diag[i];
}
return (NLMatrix)result;
}
typedef struct {
NLuint m;
NLuint n;
NLenum type;
NLDestroyMatrixFunc destroy_func;
NLMultMatrixVectorFunc mult_func;
NLSparseMatrix* M;
double omega;
NLdouble* work;
} NLSSORPreconditioner;
static void nlSSORPreconditionerDestroy(NLSSORPreconditioner* M) {
NL_DELETE_ARRAY(M->work);
}
static void nlSparseMatrixMultLowerInverse(
NLSparseMatrix* A, const NLdouble* x, NLdouble* y, double omega
) {
NLuint n = A->n;
NLdouble* diag = A->diag;
NLuint i;
NLuint ij;
NLCoeff* c = NULL;
NLdouble S;
nl_assert(A->storage & NL_MATRIX_STORE_SYMMETRIC);
nl_assert(A->storage & NL_MATRIX_STORE_ROWS);
for(i=0; i<n; i++) {
NLRowColumn* Ri = &(A->row[i]);
S = 0;
for(ij=0; ij < Ri->size; ij++) {
c = &(Ri->coeff[ij]);
nl_parano_assert(c->index <= i);
if(c->index != i) {
S += c->value * y[c->index];
}
}
nlHostBlas()->flops += (NLulong)(2*Ri->size);
y[i] = (x[i] - S) * omega / diag[i];
}
nlHostBlas()->flops += (NLulong)(n*3);
}
static void nlSparseMatrixMultUpperInverse(
NLSparseMatrix* A, const NLdouble* x, NLdouble* y, NLdouble omega
) {
NLuint n = A->n;
NLdouble* diag = A->diag;
NLint i;
NLuint ij;
NLCoeff* c = NULL;
NLdouble S;
nl_assert(A->storage & NL_MATRIX_STORE_SYMMETRIC);
nl_assert(A->storage & NL_MATRIX_STORE_COLUMNS);
for(i=(NLint)(n-1); i>=0; i--) {
NLRowColumn* Ci = &(A->column[i]);
S = 0;
for(ij=0; ij < Ci->size; ij++) {
c = &(Ci->coeff[ij]);
nl_parano_assert(c->index >= i);
if((NLint)(c->index) != i) {
S += c->value * y[c->index];
}
}
nlHostBlas()->flops += (NLulong)(2*Ci->size);
y[i] = (x[i] - S) * omega / diag[i];
}
nlHostBlas()->flops += (NLulong)(n*3);
}
static void nlSSORPreconditionerMult(
NLSSORPreconditioner* P, const double* x, double* y
) {
NLdouble* diag = P->M->diag;
NLuint i;
nlSparseMatrixMultLowerInverse(
P->M, x, P->work, P->omega
);
for(i=0; i<P->n; i++) {
P->work[i] *= (diag[i] / P->omega);
}
nlHostBlas()->flops += (NLulong)(P->n);
nlSparseMatrixMultUpperInverse(
P->M, P->work, y, P->omega
);
nlHostBlas()->Dscal(nlHostBlas(),(NLint)P->n, 2.0 - P->omega, y, 1);
}
NLMatrix nlNewSSORPreconditioner(NLMatrix M_in, double omega) {
NLSparseMatrix* M = NULL;
NLSSORPreconditioner* result = NULL;
nl_assert(M_in->type == NL_MATRIX_SPARSE_DYNAMIC);
nl_assert(M_in->m == M_in->n);
M = (NLSparseMatrix*)M_in;
result = NL_NEW(NLSSORPreconditioner);
result->m = M->m;
result->n = M->n;
result->type = NL_MATRIX_OTHER;
result->destroy_func = (NLDestroyMatrixFunc)nlSSORPreconditionerDestroy;
result->mult_func = (NLMultMatrixVectorFunc)nlSSORPreconditionerMult;
result->M = M;
result->work = NL_NEW_ARRAY(NLdouble, result->n);
result->omega = omega;
return (NLMatrix)result;
}
/******* extracted from nl_superlu.c *******/
#ifdef NL_OS_UNIX
# ifdef NL_OS_APPLE
# define SUPERLU_LIB_NAME "libsuperlu_5.dylib"
# else
# define SUPERLU_LIB_NAME "libsuperlu.so"
# endif
#else
# define SUPERLU_LIB_NAME "libsuperlu.xxx"
#endif
typedef enum {
SLU_NC, /* column-wise, no supernode */
SLU_NCP, /* column-wise, column-permuted, no supernode
(The consecutive columns of nonzeros, after permutation,
may not be stored contiguously.) */
SLU_NR, /* row-wize, no supernode */
SLU_SC, /* column-wise, supernode */
SLU_SCP, /* supernode, column-wise, permuted */
SLU_SR, /* row-wise, supernode */
SLU_DN, /* Fortran style column-wise storage for dense matrix */
SLU_NR_loc /* distributed compressed row format */
} Stype_t;
typedef enum {
SLU_S, /* single */
SLU_D, /* double */
SLU_C, /* single complex */
SLU_Z /* double complex */
} Dtype_t;
typedef enum {
SLU_GE, /* general */
SLU_TRLU, /* lower triangular, unit diagonal */
SLU_TRUU, /* upper triangular, unit diagonal */
SLU_TRL, /* lower triangular */
SLU_TRU, /* upper triangular */
SLU_SYL, /* symmetric, store lower half */
SLU_SYU, /* symmetric, store upper half */
SLU_HEL, /* Hermitian, store lower half */
SLU_HEU /* Hermitian, store upper half */
} Mtype_t;
typedef int int_t;
typedef struct {
int_t nnz; /* number of nonzeros in the matrix */
void *nzval; /* pointer to array of nonzero values, packed by raw */
int_t *colind; /* pointer to array of columns indices of the nonzeros */
int_t *rowptr; /* pointer to array of beginning of rows in nzval[]
and colind[] */
/* Note:
Zero-based indexing is used;
rowptr[] has nrow+1 entries, the last one pointing
beyond the last row, so that rowptr[nrow] = nnz. */
} NRformat;
typedef struct {
Stype_t Stype; /* Storage type: interprets the storage structure
pointed to by *Store. */
Dtype_t Dtype; /* Data type. */
Mtype_t Mtype; /* Matrix type: describes the mathematical property of
the matrix. */
int_t nrow; /* number of rows */
int_t ncol; /* number of columns */
void *Store; /* pointer to the actual storage of the matrix */
} SuperMatrix;
/* Stype == SLU_DN */
typedef struct {
int_t lda; /* leading dimension */
void *nzval; /* array of size lda*ncol to represent a dense matrix */
} DNformat;
typedef enum {NO, YES} yes_no_t;
typedef enum {DOFACT, SamePattern, SamePattern_SameRowPerm, FACTORED} fact_t;
typedef enum {NOROWPERM, LargeDiag, MY_PERMR} rowperm_t;
typedef enum {NATURAL, MMD_ATA, MMD_AT_PLUS_A, COLAMD,
METIS_AT_PLUS_A, PARMETIS, ZOLTAN, MY_PERMC} colperm_t;
typedef enum {NOTRANS, TRANS, CONJ} trans_t;
typedef enum {NOEQUIL, ROW, COL, BOTH} DiagScale_t;
typedef enum {NOREFINE, SLU_SINGLE=1, SLU_DOUBLE, SLU_EXTRA} IterRefine_t;
typedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} MemType;
typedef enum {HEAD, TAIL} stack_end_t;
typedef enum {SYSTEM, USER} LU_space_t;
typedef enum {ONE_NORM, TWO_NORM, INF_NORM} norm_t;
typedef enum {SILU, SMILU_1, SMILU_2, SMILU_3} milu_t;
typedef struct {
fact_t Fact;
yes_no_t Equil;
colperm_t ColPerm;
trans_t Trans;
IterRefine_t IterRefine;
double DiagPivotThresh;
yes_no_t SymmetricMode;
yes_no_t PivotGrowth;
yes_no_t ConditionNumber;
rowperm_t RowPerm;
int ILU_DropRule;
double ILU_DropTol; /* threshold for dropping */
double ILU_FillFactor; /* gamma in the secondary dropping */
norm_t ILU_Norm; /* infinity-norm, 1-norm, or 2-norm */
double ILU_FillTol; /* threshold for zero pivot perturbation */
milu_t ILU_MILU;
double ILU_MILU_Dim; /* Dimension of PDE (if available) */
yes_no_t ParSymbFact;
yes_no_t ReplaceTinyPivot; /* used in SuperLU_DIST */
yes_no_t SolveInitialized;
yes_no_t RefineInitialized;
yes_no_t PrintStat;
int nnzL, nnzU; /* used to store nnzs for now */
int num_lookaheads; /* num of levels in look-ahead */
yes_no_t lookahead_etree; /* use etree computed from the
serial symbolic factorization */
yes_no_t SymPattern; /* symmetric factorization */
} superlu_options_t;
typedef void* superlu_options_ptr;
typedef float flops_t;
typedef unsigned char Logical;
typedef struct {
int *panel_histo; /* histogram of panel size distribution */
double *utime; /* running time at various phases */
flops_t *ops; /* operation count at various phases */
int TinyPivots; /* number of tiny pivots */
int RefineSteps; /* number of iterative refinement steps */
int expansions; /* number of memory expansions (SuperLU4) */
} SuperLUStat_t;
/*! \brief Headers for 4 types of dynamatically managed memory */
typedef struct e_node {
int size; /* length of the memory that has been used */
void *mem; /* pointer to the new malloc'd store */
} ExpHeader;
typedef struct {
int size;
int used;
int top1; /* grow upward, relative to &array[0] */
int top2; /* grow downward */
void *array;
} LU_stack_t;
typedef struct {
int *xsup; /* supernode and column mapping */
int *supno;
int *lsub; /* compressed L subscripts */
int *xlsub;
void *lusup; /* L supernodes */
int *xlusup;
void *ucol; /* U columns */
int *usub;
int *xusub;
int nzlmax; /* current max size of lsub */
int nzumax; /* " " " ucol */
int nzlumax; /* " " " lusup */
int n; /* number of columns in the matrix */
LU_space_t MemModel; /* 0 - system malloc'd; 1 - user provided */
int num_expansions;
ExpHeader *expanders; /* Array of pointers to 4 types of memory */
LU_stack_t stack; /* use user supplied memory */
} GlobalLU_t;
typedef void (*FUNPTR_set_default_options)(superlu_options_ptr options);
typedef void (*FUNPTR_ilu_set_default_options)(superlu_options_ptr options);
typedef void (*FUNPTR_StatInit)(SuperLUStat_t *);
typedef void (*FUNPTR_StatFree)(SuperLUStat_t *);
typedef void (*FUNPTR_dCreate_CompCol_Matrix)(
SuperMatrix *, int, int, int, const double *,
const int *, const int *, Stype_t, Dtype_t, Mtype_t);
typedef void (*FUNPTR_dCreate_Dense_Matrix)(
SuperMatrix *, int, int, const double *, int,
Stype_t, Dtype_t, Mtype_t);
typedef void (*FUNPTR_Destroy_SuperNode_Matrix)(SuperMatrix *);
typedef void (*FUNPTR_Destroy_CompCol_Matrix)(SuperMatrix *);
typedef void (*FUNPTR_Destroy_CompCol_Permuted)(SuperMatrix *);
typedef void (*FUNPTR_Destroy_SuperMatrix_Store)(SuperMatrix *);
typedef void (*FUNPTR_dgssv)(
superlu_options_ptr, SuperMatrix *, int *, int *, SuperMatrix *,
SuperMatrix *, SuperMatrix *, SuperLUStat_t *, int *
);
typedef void (*FUNPTR_dgstrs)(
trans_t, SuperMatrix *, SuperMatrix *, int *, int *,
SuperMatrix *, SuperLUStat_t*, int *
);
typedef void (*FUNPTR_get_perm_c)(int, SuperMatrix *, int *);
typedef void (*FUNPTR_sp_preorder)(
superlu_options_t *, SuperMatrix*, int*, int*, SuperMatrix*
);
typedef int (*FUNPTR_sp_ienv)(int);
typedef int (*FUNPTR_input_error)(const char *, int *);
typedef void (*FUNPTR_dgstrf) (superlu_options_t *options, SuperMatrix *A,
int relax, int panel_size, int *etree, void *work, int lwork,
int *perm_c, int *perm_r, SuperMatrix *L, SuperMatrix *U,
GlobalLU_t *Glu, /* persistent to facilitate multiple factorizations */
SuperLUStat_t *stat, int *info
);
typedef struct {
FUNPTR_set_default_options set_default_options;
FUNPTR_ilu_set_default_options ilu_set_default_options;
FUNPTR_StatInit StatInit;
FUNPTR_StatFree StatFree;
FUNPTR_dCreate_CompCol_Matrix dCreate_CompCol_Matrix;
FUNPTR_dCreate_Dense_Matrix dCreate_Dense_Matrix;
FUNPTR_Destroy_SuperNode_Matrix Destroy_SuperNode_Matrix;
FUNPTR_Destroy_CompCol_Matrix Destroy_CompCol_Matrix;
FUNPTR_Destroy_CompCol_Permuted Destroy_CompCol_Permuted;
FUNPTR_Destroy_SuperMatrix_Store Destroy_SuperMatrix_Store;
FUNPTR_dgssv dgssv;
FUNPTR_dgstrs dgstrs;
FUNPTR_get_perm_c get_perm_c;
FUNPTR_sp_preorder sp_preorder;
FUNPTR_sp_ienv sp_ienv;
FUNPTR_dgstrf dgstrf;
FUNPTR_input_error input_error;
NLdll DLL_handle;
} SuperLUContext;
static SuperLUContext* SuperLU() {
static SuperLUContext context;
static NLboolean init = NL_FALSE;
if(!init) {
init = NL_TRUE;
memset(&context, 0, sizeof(context));
}
return &context;
}
NLboolean nlExtensionIsInitialized_SUPERLU() {
return
SuperLU()->DLL_handle != NULL &&
SuperLU()->set_default_options != NULL &&
SuperLU()->ilu_set_default_options != NULL &&
SuperLU()->StatInit != NULL &&
SuperLU()->StatFree != NULL &&
SuperLU()->dCreate_CompCol_Matrix != NULL &&
SuperLU()->dCreate_Dense_Matrix != NULL &&
SuperLU()->Destroy_SuperNode_Matrix != NULL &&
SuperLU()->Destroy_CompCol_Matrix != NULL &&
SuperLU()->Destroy_CompCol_Permuted != NULL &&
SuperLU()->Destroy_SuperMatrix_Store != NULL &&
SuperLU()->dgssv != NULL &&
SuperLU()->dgstrs != NULL &&
SuperLU()->get_perm_c != NULL &&
SuperLU()->sp_preorder != NULL &&
SuperLU()->sp_ienv != NULL &&
SuperLU()->dgstrf != NULL &&
SuperLU()->input_error != NULL;
}
static void nlTerminateExtension_SUPERLU(void) {
if(SuperLU()->DLL_handle != NULL) {
nlCloseDLL(SuperLU()->DLL_handle);
SuperLU()->DLL_handle = NULL;
}
}
#define find_superlu_func(name) \
if( \
( \
SuperLU()->name = \
(FUNPTR_##name)nlFindFunction(SuperLU()->DLL_handle,#name) \
) == NULL \
) { \
nlError("nlInitExtension_SUPERLU","function not found"); \
nlError("nlInitExtension_SUPERLU",#name); \
return NL_FALSE; \
}
NLboolean nlInitExtension_SUPERLU(void) {
NLenum flags = NL_LINK_NOW | NL_LINK_USE_FALLBACK;
if(nlCurrentContext == NULL || !nlCurrentContext->verbose) {
flags |= NL_LINK_QUIET;
}
if(SuperLU()->DLL_handle != NULL) {
return nlExtensionIsInitialized_SUPERLU();
}
SuperLU()->DLL_handle = nlOpenDLL(SUPERLU_LIB_NAME, flags);
if(SuperLU()->DLL_handle == NULL) {
return NL_FALSE;
}
find_superlu_func(set_default_options);
find_superlu_func(ilu_set_default_options);
find_superlu_func(StatInit);
find_superlu_func(StatFree);
find_superlu_func(dCreate_CompCol_Matrix);
find_superlu_func(dCreate_Dense_Matrix);
find_superlu_func(Destroy_SuperNode_Matrix);
find_superlu_func(Destroy_CompCol_Matrix);
find_superlu_func(Destroy_CompCol_Permuted);
find_superlu_func(Destroy_SuperMatrix_Store);
find_superlu_func(dgssv);
find_superlu_func(dgstrs);
find_superlu_func(get_perm_c);
find_superlu_func(sp_preorder);
find_superlu_func(sp_ienv);
find_superlu_func(dgstrf);
find_superlu_func(input_error);
atexit(nlTerminateExtension_SUPERLU);
return NL_TRUE;
}
typedef struct {
NLuint m;
NLuint n;
NLenum type;
NLDestroyMatrixFunc destroy_func;
NLMultMatrixVectorFunc mult_func;
SuperMatrix L;
SuperMatrix U;
int* perm_r;
int* perm_c;
trans_t trans;
} NLSuperLUFactorizedMatrix;
static void nlSuperLUFactorizedMatrixDestroy(NLSuperLUFactorizedMatrix* M) {
SuperLU()->Destroy_SuperNode_Matrix(&M->L);
SuperLU()->Destroy_CompCol_Matrix(&M->U);
NL_DELETE_ARRAY(M->perm_r);
NL_DELETE_ARRAY(M->perm_c);
}
static void nlSuperLUFactorizedMatrixMult(
NLSuperLUFactorizedMatrix* M, const double* x, double* y
) {
SuperMatrix B;
SuperLUStat_t stat;
int info = 0;
NLuint i;
/* Create vector */
SuperLU()->dCreate_Dense_Matrix(
&B, (int)(M->n), 1, y, (int)(M->n),
SLU_DN, /* Fortran-type column-wise storage */
SLU_D, /* doubles */
SLU_GE /* general */
);
/* copy rhs onto y (superLU matrix-vector product expects it here */
for(i = 0; i < M->n; i++){
y[i] = x[i];
}
/* Call SuperLU triangular solve */
SuperLU()->StatInit(&stat) ;
SuperLU()->dgstrs(
M->trans, &M->L, &M->U, M->perm_c, M->perm_r, &B, &stat, &info
);
SuperLU()->StatFree(&stat) ;
/* Only the "store" structure needs to be
* deallocated (the array has been allocated
* by client code).
*/
SuperLU()->Destroy_SuperMatrix_Store(&B) ;
}
/*
* Copied from SUPERLU/dgssv.c, removed call to linear solve.
*/
static void dgssv_factorize_only(
superlu_options_t *options, SuperMatrix *A, int *perm_c, int *perm_r,
SuperMatrix *L, SuperMatrix *U,
SuperLUStat_t *stat, int *info, trans_t *trans
) {
SuperMatrix *AA = NULL;
/* A in SLU_NC format used by the factorization routine.*/
SuperMatrix AC; /* Matrix postmultiplied by Pc */
int lwork = 0, *etree, i;
GlobalLU_t Glu; /* Not needed on return. */
/* Set default values for some parameters */
int panel_size; /* panel size */
int relax; /* no of columns in a relaxed snodes */
int permc_spec;
nl_assert(A->Stype == SLU_NR || A->Stype == SLU_NC);
*trans = NOTRANS;
if ( options->Fact != DOFACT ) *info = -1;
else if ( A->nrow != A->ncol || A->nrow < 0 ||
(A->Stype != SLU_NC && A->Stype != SLU_NR) ||
A->Dtype != SLU_D || A->Mtype != SLU_GE )
*info = -2;
if ( *info != 0 ) {
i = -(*info);
SuperLU()->input_error("SUPERLU/OpenNL dgssv_factorize_only", &i);
return;
}
/* Convert A to SLU_NC format when necessary. */
if ( A->Stype == SLU_NR ) {
NRformat *Astore = (NRformat*)A->Store;
AA = NL_NEW(SuperMatrix);
SuperLU()->dCreate_CompCol_Matrix(
AA, A->ncol, A->nrow, Astore->nnz,
(double*)Astore->nzval, Astore->colind, Astore->rowptr,
SLU_NC, A->Dtype, A->Mtype
);
*trans = TRANS;
} else {
if ( A->Stype == SLU_NC ) AA = A;
}
nl_assert(AA != NULL);
/*
* Get column permutation vector perm_c[], according to permc_spec:
* permc_spec = NATURAL: natural ordering
* permc_spec = MMD_AT_PLUS_A: minimum degree on structure of A'+A
* permc_spec = MMD_ATA: minimum degree on structure of A'*A
* permc_spec = COLAMD: approximate minimum degree column ordering
* permc_spec = MY_PERMC: the ordering already supplied in perm_c[]
*/
permc_spec = options->ColPerm;
if ( permc_spec != MY_PERMC && options->Fact == DOFACT )
SuperLU()->get_perm_c(permc_spec, AA, perm_c);
etree = NL_NEW_ARRAY(int,A->ncol);
SuperLU()->sp_preorder(options, AA, perm_c, etree, &AC);
panel_size = SuperLU()->sp_ienv(1);
relax = SuperLU()->sp_ienv(2);
SuperLU()->dgstrf(options, &AC, relax, panel_size, etree,
NULL, lwork, perm_c, perm_r, L, U, &Glu, stat, info);
NL_DELETE_ARRAY(etree);
SuperLU()->Destroy_CompCol_Permuted(&AC);
if ( A->Stype == SLU_NR ) {
SuperLU()->Destroy_SuperMatrix_Store(AA);
NL_DELETE(AA);
}
}
NLMatrix nlMatrixFactorize_SUPERLU(
NLMatrix M, NLenum solver
) {
NLSuperLUFactorizedMatrix* LU = NULL;
NLCRSMatrix* CRS = NULL;
SuperMatrix superM;
NLuint n = M->n;
superlu_options_t options;
SuperLUStat_t stat;
NLint info = 0; /* status code */
nl_assert(M->m == M->n);
if(M->type == NL_MATRIX_CRS) {
CRS = (NLCRSMatrix*)M;
} else if(M->type == NL_MATRIX_SPARSE_DYNAMIC) {
CRS = (NLCRSMatrix*)nlCRSMatrixNewFromSparseMatrix((NLSparseMatrix*)M);
}
nl_assert(!(CRS->symmetric_storage));
LU = NL_NEW(NLSuperLUFactorizedMatrix);
LU->m = M->m;
LU->n = M->n;
LU->type = NL_MATRIX_OTHER;
LU->destroy_func = (NLDestroyMatrixFunc)(nlSuperLUFactorizedMatrixDestroy);
LU->mult_func = (NLMultMatrixVectorFunc)(nlSuperLUFactorizedMatrixMult);
LU->perm_c = NL_NEW_ARRAY(int, n);
LU->perm_r = NL_NEW_ARRAY(int, n);
SuperLU()->dCreate_CompCol_Matrix(
&superM, (int)n, (int)n, (int)nlCRSMatrixNNZ(CRS),
CRS->val, (int*)CRS->colind, (int*)CRS->rowptr,
SLU_NR, /* Row_wise, no supernode */
SLU_D, /* doubles */
CRS->symmetric_storage ? SLU_SYL : SLU_GE
);
SuperLU()->set_default_options(&options);
switch(solver) {
case NL_SUPERLU_EXT: {
options.ColPerm = NATURAL;
} break;
case NL_PERM_SUPERLU_EXT: {
options.ColPerm = COLAMD;
} break;
case NL_SYMMETRIC_SUPERLU_EXT: {
options.ColPerm = MMD_AT_PLUS_A;
options.SymmetricMode = YES;
} break;
default:
nl_assert_not_reached;
}
SuperLU()->StatInit(&stat);
dgssv_factorize_only(
&options, &superM, LU->perm_c, LU->perm_r,
&LU->L, &LU->U, &stat, &info, &LU->trans
);
SuperLU()->StatFree(&stat);
/*
* Only the "store" structure needs to be deallocated
* (the arrays have been allocated by us, they are in CRS).
*/
SuperLU()->Destroy_SuperMatrix_Store(&superM);
if((NLMatrix)CRS != M) {
nlDeleteMatrix((NLMatrix)CRS);
}
if(info != 0) {
NL_DELETE(LU);
LU = NULL;
}
return (NLMatrix)LU;
}
/******* extracted from nl_cholmod.c *******/
#ifdef NL_OS_UNIX
# ifdef NL_OS_APPLE
# define CHOLMOD_LIB_NAME "libcholmod.dylib"
# else
# define CHOLMOD_LIB_NAME "libcholmod.so"
# endif
#else
# define CHOLMOD_LIB_NAME "libcholmod.xxx"
#endif
/* Excerpt from cholmod_core.h */
/* A dense matrix in column-oriented form. It has no itype since it contains
* no integers. Entry in row i and column j is located in x [i+j*d].
*/
typedef struct cholmod_dense_struct {
size_t nrow ; /* the matrix is nrow-by-ncol */
size_t ncol ;
size_t nzmax ; /* maximum number of entries in the matrix */
size_t d ; /* leading dimension (d >= nrow must hold) */
void *x ; /* size nzmax or 2*nzmax, if present */
void *z ; /* size nzmax, if present */
int xtype ; /* pattern, real, complex, or zomplex */
int dtype ; /* x and z double or float */
} cholmod_dense ;
/* A sparse matrix stored in compressed-column form. */
typedef struct cholmod_sparse_struct
{
size_t nrow ; /* the matrix is nrow-by-ncol */
size_t ncol ;
size_t nzmax ; /* maximum number of entries in the matrix */
/* pointers to int or SuiteSparse_long: */
void *p ; /* p [0..ncol], the column pointers */
void *i ; /* i [0..nzmax-1], the row indices */
/* for unpacked matrices only: */
void *nz ; /* nz [0..ncol-1], the # of nonzeros in each col. In
* packed form, the nonzero pattern of column j is in
* A->i [A->p [j] ... A->p [j+1]-1]. In unpacked form, column j is in
* A->i [A->p [j] ... A->p [j]+A->nz[j]-1] instead. In both cases, the
* numerical values (if present) are in the corresponding locations in
* the array x (or z if A->xtype is CHOLMOD_ZOMPLEX). */
/* pointers to double or float: */
void *x ; /* size nzmax or 2*nzmax, if present */
void *z ; /* size nzmax, if present */
int stype ; /* Describes what parts of the matrix are considered:
*
* 0: matrix is "unsymmetric": use both upper and lower triangular parts
* (the matrix may actually be symmetric in pattern and value, but
* both parts are explicitly stored and used). May be square or
* rectangular.
* >0: matrix is square and symmetric, use upper triangular part.
* Entries in the lower triangular part are ignored.
* <0: matrix is square and symmetric, use lower triangular part.
* Entries in the upper triangular part are ignored.
*
* Note that stype>0 and stype<0 are different for cholmod_sparse and
* cholmod_triplet. See the cholmod_triplet data structure for more
* details.
*/
int itype ; /* CHOLMOD_INT: p, i, and nz are int.
* CHOLMOD_INTLONG: p is SuiteSparse_long,
* i and nz are int.
* CHOLMOD_LONG: p, i, and nz are SuiteSparse_long */
int xtype ; /* pattern, real, complex, or zomplex */
int dtype ; /* x and z are double or float */
int sorted ; /* TRUE if columns are sorted, FALSE otherwise */
int packed ; /* TRUE if packed (nz ignored), FALSE if unpacked
* (nz is required) */
} cholmod_sparse ;
typedef void* cholmod_common_ptr;
typedef cholmod_dense* cholmod_dense_ptr;
typedef cholmod_sparse* cholmod_sparse_ptr;
typedef void* cholmod_factor_ptr;
typedef enum cholmod_xtype_enum {
CHOLMOD_PATTERN =0,
CHOLMOD_REAL =1,
CHOLMOD_COMPLEX =2,
CHOLMOD_ZOMPLEX =3
} cholmod_xtype;
typedef enum cholmod_solve_type_enum {
CHOLMOD_A =0,
CHOLMOD_LDLt =1,
CHOLMOD_LD =2,
CHOLMOD_DLt =3,
CHOLMOD_L =4,
CHOLMOD_Lt =5,
CHOLMOD_D =6,
CHOLMOD_P =7,
CHOLMOD_Pt =8
} cholmod_solve_type;
typedef int cholmod_stype;
typedef void (*FUNPTR_cholmod_start)(cholmod_common_ptr);
typedef cholmod_sparse_ptr (*FUNPTR_cholmod_allocate_sparse)(
size_t m, size_t n, size_t nnz, int sorted,
int packed, int stype, int xtype, cholmod_common_ptr
);
typedef cholmod_dense_ptr (*FUNPTR_cholmod_allocate_dense)(
size_t m, size_t n, size_t d, int xtype, cholmod_common_ptr
);
typedef cholmod_factor_ptr (*FUNPTR_cholmod_analyze)(
cholmod_sparse_ptr A, cholmod_common_ptr
);
typedef int (*FUNPTR_cholmod_factorize)(
cholmod_sparse_ptr A, cholmod_factor_ptr L, cholmod_common_ptr
);
typedef cholmod_dense_ptr (*FUNPTR_cholmod_solve)(
int solve_type, cholmod_factor_ptr, cholmod_dense_ptr, cholmod_common_ptr
);
typedef void (*FUNPTR_cholmod_free_factor)(
cholmod_factor_ptr*, cholmod_common_ptr
);
typedef void (*FUNPTR_cholmod_free_dense)(
cholmod_dense_ptr*, cholmod_common_ptr
);
typedef void (*FUNPTR_cholmod_free_sparse)(
cholmod_sparse_ptr*, cholmod_common_ptr
);
typedef void (*FUNPTR_cholmod_finish)(cholmod_common_ptr);
typedef struct {
char cholmod_common[16384];
FUNPTR_cholmod_start cholmod_start;
FUNPTR_cholmod_allocate_sparse cholmod_allocate_sparse;
FUNPTR_cholmod_allocate_dense cholmod_allocate_dense;
FUNPTR_cholmod_analyze cholmod_analyze;
FUNPTR_cholmod_factorize cholmod_factorize;
FUNPTR_cholmod_solve cholmod_solve;
FUNPTR_cholmod_free_factor cholmod_free_factor;
FUNPTR_cholmod_free_sparse cholmod_free_sparse;
FUNPTR_cholmod_free_dense cholmod_free_dense;
FUNPTR_cholmod_finish cholmod_finish;
NLdll DLL_handle;
} CHOLMODContext;
static CHOLMODContext* CHOLMOD() {
static CHOLMODContext context;
static NLboolean init = NL_FALSE;
if(!init) {
init = NL_TRUE;
memset(&context, 0, sizeof(context));
}
return &context;
}
NLboolean nlExtensionIsInitialized_CHOLMOD() {
return
CHOLMOD()->DLL_handle != NULL &&
CHOLMOD()->cholmod_start != NULL &&
CHOLMOD()->cholmod_allocate_sparse != NULL &&
CHOLMOD()->cholmod_allocate_dense != NULL &&
CHOLMOD()->cholmod_analyze != NULL &&
CHOLMOD()->cholmod_factorize != NULL &&
CHOLMOD()->cholmod_solve != NULL &&
CHOLMOD()->cholmod_free_factor != NULL &&
CHOLMOD()->cholmod_free_sparse != NULL &&
CHOLMOD()->cholmod_free_dense != NULL &&
CHOLMOD()->cholmod_finish != NULL ;
}
#define find_cholmod_func(name) \
if( \
( \
CHOLMOD()->name = \
(FUNPTR_##name)nlFindFunction(CHOLMOD()->DLL_handle,#name) \
) == NULL \
) { \
nlError("nlInitExtension_CHOLMOD","function not found"); \
return NL_FALSE; \
}
static void nlTerminateExtension_CHOLMOD(void) {
if(CHOLMOD()->DLL_handle != NULL) {
CHOLMOD()->cholmod_finish(&CHOLMOD()->cholmod_common);
nlCloseDLL(CHOLMOD()->DLL_handle);
CHOLMOD()->DLL_handle = NULL;
}
}
NLboolean nlInitExtension_CHOLMOD(void) {
NLenum flags = NL_LINK_NOW | NL_LINK_USE_FALLBACK;
if(nlCurrentContext == NULL || !nlCurrentContext->verbose) {
flags |= NL_LINK_QUIET;
}
if(CHOLMOD()->DLL_handle != NULL) {
return nlExtensionIsInitialized_CHOLMOD();
}
/*
* MKL has a built-in CHOLMOD that conflicts with
* the CHOLMOD used by OpenNL (to be fixed). For now
* we simply output a warning message and deactivate the
* CHOLMOD extension if the MKL extension was initialized
* before.
*/
if(NLMultMatrixVector_MKL != NULL) {
nl_fprintf(
stderr,
"CHOLMOD extension incompatible with MKL (deactivating)"
);
return NL_FALSE;
}
CHOLMOD()->DLL_handle = nlOpenDLL(CHOLMOD_LIB_NAME,flags);
if(CHOLMOD()->DLL_handle == NULL) {
return NL_FALSE;
}
find_cholmod_func(cholmod_start);
find_cholmod_func(cholmod_allocate_sparse);
find_cholmod_func(cholmod_allocate_dense);
find_cholmod_func(cholmod_analyze);
find_cholmod_func(cholmod_factorize);
find_cholmod_func(cholmod_solve);
find_cholmod_func(cholmod_free_factor);
find_cholmod_func(cholmod_free_sparse);
find_cholmod_func(cholmod_free_dense);
find_cholmod_func(cholmod_finish);
CHOLMOD()->cholmod_start(&CHOLMOD()->cholmod_common);
atexit(nlTerminateExtension_CHOLMOD);
return NL_TRUE;
}
typedef struct {
NLuint m;
NLuint n;
NLenum type;
NLDestroyMatrixFunc destroy_func;
NLMultMatrixVectorFunc mult_func;
cholmod_factor_ptr L;
} NLCholmodFactorizedMatrix;
static void nlCholmodFactorizedMatrixDestroy(NLCholmodFactorizedMatrix* M) {
CHOLMOD()->cholmod_free_factor(&M->L, &CHOLMOD()->cholmod_common);
}
static void nlCholmodFactorizedMatrixMult(
NLCholmodFactorizedMatrix* M, const double* x, double* y
) {
/*
* TODO: see whether CHOLDMOD can use user-allocated vectors
* (and avoid copy)
*/
cholmod_dense_ptr X=CHOLMOD()->cholmod_allocate_dense(
M->n, 1, M->n, CHOLMOD_REAL, &CHOLMOD()->cholmod_common
);
cholmod_dense_ptr Y=NULL;
memcpy(X->x, x, M->n*sizeof(double));
Y = CHOLMOD()->cholmod_solve(
CHOLMOD_A, M->L, X, &CHOLMOD()->cholmod_common
);
memcpy(y, Y->x, M->n*sizeof(double));
CHOLMOD()->cholmod_free_dense(&X, &CHOLMOD()->cholmod_common);
CHOLMOD()->cholmod_free_dense(&Y, &CHOLMOD()->cholmod_common);
}
NLMatrix nlMatrixFactorize_CHOLMOD(
NLMatrix M, NLenum solver
) {
NLCholmodFactorizedMatrix* LLt = NULL;
NLCRSMatrix* CRS = NULL;
cholmod_sparse_ptr cM= NULL;
NLuint nnz, cur, i, j, jj;
int* rowptr = NULL;
int* colind = NULL;
double* val = NULL;
NLuint n = M->n;
nl_assert(solver == NL_CHOLMOD_EXT);
nl_assert(M->m == M->n);
if(M->type == NL_MATRIX_CRS) {
CRS = (NLCRSMatrix*)M;
} else if(M->type == NL_MATRIX_SPARSE_DYNAMIC) {
/*
* Note: since we convert once again into symmetric storage,
* we could also directly read the NLSparseMatrix there instead
* of copying once more...
*/
CRS = (NLCRSMatrix*)nlCRSMatrixNewFromSparseMatrix((NLSparseMatrix*)M);
}
LLt = NL_NEW(NLCholmodFactorizedMatrix);
LLt->m = M->m;
LLt->n = M->n;
LLt->type = NL_MATRIX_OTHER;
LLt->destroy_func = (NLDestroyMatrixFunc)(nlCholmodFactorizedMatrixDestroy);
LLt->mult_func = (NLMultMatrixVectorFunc)(nlCholmodFactorizedMatrixMult);
/*
* Compute required nnz, if matrix is not already with symmetric storage,
* ignore entries in the upper triangular part.
*/
nnz=0;
for(i=0; i<n; ++i) {
for(jj=CRS->rowptr[i]; jj<CRS->rowptr[i+1]; ++jj) {
j=CRS->colind[jj];
if(j <= i) {
++nnz;
}
}
}
/*
* Copy CRS matrix into CHOLDMOD matrix (and ignore upper trianglar part)
*/
cM = CHOLMOD()->cholmod_allocate_sparse(
n, n, nnz, /* Dimensions and number of non-zeros */
NL_FALSE, /* Sorted = false */
NL_TRUE, /* Packed = true */
1, /* stype (-1 = lower triangular, 1 = upper triangular) */
CHOLMOD_REAL, /* Entries are real numbers */
&CHOLMOD()->cholmod_common
);
rowptr = (int*)cM->p;
colind = (int*)cM->i;
val = (double*)cM->x;
cur = 0;
for(i=0; i<n; ++i) {
rowptr[i] = (int)cur;
for(jj=CRS->rowptr[i]; jj<CRS->rowptr[i+1]; ++jj) {
j = CRS->colind[jj];
if(j <= i) {
val[cur] = CRS->val[jj];
colind[cur] = (int)j;
++cur;
}
}
}
rowptr[n] = (int)cur;
nl_assert(cur==nnz);
LLt->L = CHOLMOD()->cholmod_analyze(cM, &CHOLMOD()->cholmod_common);
if(!CHOLMOD()->cholmod_factorize(cM, LLt->L, &CHOLMOD()->cholmod_common)) {
CHOLMOD()->cholmod_free_factor(&LLt->L, &CHOLMOD()->cholmod_common);
NL_DELETE(LLt);
}
CHOLMOD()->cholmod_free_sparse(&cM, &CHOLMOD()->cholmod_common);
if((NLMatrix)CRS != M) {
nlDeleteMatrix((NLMatrix)CRS);
}
return (NLMatrix)(LLt);
}
/******* extracted from nl_arpack.c *******/
#ifdef NL_OS_UNIX
# ifdef NL_OS_APPLE
# define ARPACK_LIB_NAME "libarpack.dylib"
# else
# define ARPACK_LIB_NAME "libarpack.so"
# endif
#else
# define ARPACK_LIB_NAME "libarpack.dll"
#endif
typedef int ARint;
typedef int ARlogical;
/* double precision symmetric routines */
typedef void (*FUNPTR_dsaupd)(
ARint *ido, char *bmat, ARint *n, char *which,
ARint *nev, double *tol, double *resid,
ARint *ncv, double *V, ARint *ldv,
ARint *iparam, ARint *ipntr, double *workd,
double *workl, ARint *lworkl, ARint *info
);
typedef void (*FUNPTR_dseupd)(
ARlogical *rvec, char *HowMny, ARlogical *select,
double *d, double *Z, ARint *ldz,
double *sigma, char *bmat, ARint *n,
char *which, ARint *nev, double *tol,
double *resid, ARint *ncv, double *V,
ARint *ldv, ARint *iparam, ARint *ipntr,
double *workd, double *workl,
ARint *lworkl, ARint *info
);
/* double precision nonsymmetric routines */
typedef void (*FUNPTR_dnaupd)(
ARint *ido, char *bmat, ARint *n, char *which,
ARint *nev, double *tol, double *resid,
ARint *ncv, double *V, ARint *ldv,
ARint *iparam, ARint *ipntr, double *workd,
double *workl, ARint *lworkl, ARint *info
);
typedef void (*FUNPTR_dneupd)(
ARlogical *rvec, char *HowMny, ARlogical *select,
double *dr, double *di, double *Z,
ARint *ldz, double *sigmar,
double *sigmai, double *workev,
char *bmat, ARint *n, char *which,
ARint *nev, double *tol, double *resid,
ARint *ncv, double *V, ARint *ldv,
ARint *iparam, ARint *ipntr,
double *workd, double *workl,
ARint *lworkl, ARint *info
);
typedef struct {
FUNPTR_dsaupd dsaupd;
FUNPTR_dseupd dseupd;
FUNPTR_dnaupd dnaupd;
FUNPTR_dneupd dneupd;
NLdll DLL_handle;
} ARPACKContext;
static ARPACKContext* ARPACK() {
static ARPACKContext context;
static NLboolean init = NL_FALSE;
if(!init) {
init = NL_TRUE;
memset(&context, 0, sizeof(context));
}
return &context;
}
NLboolean nlExtensionIsInitialized_ARPACK() {
return
ARPACK()->DLL_handle != NULL &&
ARPACK()->dsaupd != NULL &&
ARPACK()->dseupd != NULL &&
ARPACK()->dnaupd != NULL &&
ARPACK()->dneupd != NULL;
}
static void nlTerminateExtension_ARPACK(void) {
if(ARPACK()->DLL_handle != NULL) {
nlCloseDLL(ARPACK()->DLL_handle);
ARPACK()->DLL_handle = NULL;
}
}
static char* u(const char* str) {
static char buff[1000];
sprintf(buff, "%s_", str);
return buff;
}
#define find_arpack_func(name) \
if( \
( \
ARPACK()->name = \
(FUNPTR_##name)nlFindFunction(ARPACK()->DLL_handle,u(#name)) \
) == NULL \
) { \
nlError("nlInitExtension_ARPACK","function not found"); \
nlError("nlInitExtension_ARPACK",u(#name)); \
return NL_FALSE; \
}
NLboolean nlInitExtension_ARPACK(void) {
NLenum flags = NL_LINK_NOW | NL_LINK_USE_FALLBACK;
if(nlCurrentContext == NULL || !nlCurrentContext->verbose) {
flags |= NL_LINK_QUIET;
}
if(ARPACK()->DLL_handle != NULL) {
return nlExtensionIsInitialized_ARPACK();
}
ARPACK()->DLL_handle = nlOpenDLL(ARPACK_LIB_NAME, flags);
if(ARPACK()->DLL_handle == NULL) {
return NL_FALSE;
}
find_arpack_func(dsaupd);
find_arpack_func(dseupd);
find_arpack_func(dnaupd);
find_arpack_func(dneupd);
atexit(nlTerminateExtension_ARPACK);
return NL_TRUE;
}
static NLMatrix create_OP(NLboolean symmetric) {
NLuint n = nlCurrentContext->M->n;
NLuint i;
NLMatrix result = NULL;
if(nlCurrentContext->eigen_shift != 0.0) {
/*
* A = M
*/
NLSparseMatrix* A = NL_NEW(NLSparseMatrix);
nlSparseMatrixConstruct(A, n, n, NL_MATRIX_STORE_ROWS);
nlSparseMatrixAddMatrix(A, 1.0, nlCurrentContext->M);
if(nlCurrentContext->B == NULL) {
/*
* A = A - shift * Id
*/
for(i=0; i<n; ++i) {
nlSparseMatrixAdd(A, i, i, -nlCurrentContext->eigen_shift);
}
} else {
/*
* A = A - shift * B
*/
nlSparseMatrixAddMatrix(
A, -nlCurrentContext->eigen_shift, nlCurrentContext->B
);
}
/*
* OP = A^{-1}
*/
if(nlCurrentContext->verbose) {
nl_printf("Factorizing matrix...\n");
}
result = nlMatrixFactorize(
(NLMatrix)A,
symmetric ? NL_SYMMETRIC_SUPERLU_EXT : NL_PERM_SUPERLU_EXT
);
if(nlCurrentContext->verbose) {
if(result == NULL) {
nl_printf("Could not factorize matrix\n");
} else {
nl_printf("Matrix factorized\n");
}
}
nlDeleteMatrix((NLMatrix)A);
} else {
/*
* OP = M^{-1}
*/
if(nlCurrentContext->verbose) {
nl_printf("Factorizing matrix...\n");
}
result = nlMatrixFactorize(
nlCurrentContext->M,
symmetric ? NL_SYMMETRIC_SUPERLU_EXT : NL_PERM_SUPERLU_EXT
);
if(nlCurrentContext->verbose) {
if(result == NULL) {
nl_printf("Could not factorize matrix\n");
} else {
nl_printf("Matrix factorized\n");
}
}
}
if(result == NULL) {
return NULL;
}
if(nlCurrentContext->B != NULL) {
/*
* OP = OP * B
*/
result = nlMatrixNewFromProduct(
result, NL_TRUE, /* mem. ownership transferred */
nlCurrentContext->B, NL_FALSE /* mem. ownership kept by context */
);
}
return result;
}
static int eigencompare(const void* pi, const void* pj) {
NLuint i = *(const NLuint*)pi;
NLuint j = *(const NLuint*)pj;
double vali = fabs(nlCurrentContext->temp_eigen_value[i]);
double valj = fabs(nlCurrentContext->temp_eigen_value[j]);
if(vali == valj) {
return 0;
}
return vali < valj ? -1 : 1;
}
void nlEigenSolve_ARPACK(void) {
NLboolean symmetric =
nlCurrentContext->symmetric && (nlCurrentContext->B == NULL);
int n = (int)nlCurrentContext->M->n; /* Dimension of the matrix */
int nev = /* Number of eigenvectors requested */
(int)nlCurrentContext->nb_systems;
NLMatrix OP = create_OP(symmetric);
int ncv = (int)(nev * 2.5); /* Length of Arnoldi factorization */
/* Rule of thumb in ARPACK documentation: ncv > 2 * nev */
int* iparam = NULL;
int* ipntr = NULL;
NLdouble* resid = NULL;
NLdouble* workev = NULL;
NLdouble* workd = NULL;
NLdouble* workl = NULL;
NLdouble* v = NULL;
NLdouble* d = NULL;
ARlogical* select = NULL;
ARlogical rvec = 1;
double sigmar = 0.0;
double sigmai = 0.0;
int ierr;
int i,k,kk;
int ldv = (int)n;
char* bmat = (char*)"I"; /*Standard problem */
char* which = (char*)"LM"; /*Largest eigenvalues, but we invert->smallest */
char* howmny = (char*)"A"; /*which eigens should be computed: all */
double tol = nlCurrentContext->threshold;
int ido = 0; /* reverse communication variable (which operation ?) */
int info = 1; /* start with initial value of resid */
int lworkl; /* size of work array */
NLboolean converged = NL_FALSE;
NLdouble value;
int index;
int* sorted; /* indirection array for sorting eigenpairs */
if(OP == NULL) {
nlError("nlEigenSolve_ARPACK","Could not factorize matrix");
return;
}
if(ncv > n) {
ncv = n;
}
if(nev > n) {
nev = n;
}
if(nev + 2 > ncv) {
nev = ncv - 2;
}
if(symmetric) {
lworkl = ncv * (ncv + 8) ;
} else {
lworkl = 3*ncv*ncv + 6*ncv ;
}
iparam = NL_NEW_ARRAY(int, 11);
ipntr = NL_NEW_ARRAY(int, 14);
iparam[1-1] = 1; /* ARPACK chooses the shifts */
iparam[3-1] = (int)nlCurrentContext->max_iterations;
iparam[7-1] = 1; /* Normal mode (we do not use
shift-invert (3) since we do our own shift-invert */
workev = NL_NEW_ARRAY(NLdouble, 3*ncv);
workd = NL_NEW_ARRAY(NLdouble, 3*n);
resid = NL_NEW_ARRAY(NLdouble, n);
for(i=0; i<n; ++i) {
resid[i] = 1.0; /* (double)i / (double)n; */
}
v = NL_NEW_ARRAY(NLdouble, ldv*ncv);
if(symmetric) {
d = NL_NEW_ARRAY(NLdouble, 2*ncv);
} else {
d = NL_NEW_ARRAY(NLdouble, 3*ncv);
}
workl = NL_NEW_ARRAY(NLdouble, lworkl);
if(nlCurrentContext->verbose) {
if(symmetric) {
nl_printf("calling dsaupd()\n");
} else {
nl_printf("calling dnaupd()\n");
}
}
while(!converged) {
/*
if(nlCurrentContext->verbose) {
fprintf(stderr, ".");
fflush(stderr);
}
*/
if(symmetric) {
ARPACK()->dsaupd(
&ido, bmat, &n, which, &nev, &tol, resid, &ncv,
v, &ldv, iparam, ipntr, workd, workl, &lworkl, &info
);
} else {
ARPACK()->dnaupd(
&ido, bmat, &n, which, &nev, &tol, resid, &ncv,
v, &ldv, iparam, ipntr, workd, workl, &lworkl, &info
);
}
if(ido == 1) {
nlMultMatrixVector(
OP,
workd+ipntr[1-1]-1, /*The "-1"'s are for FORTRAN-to-C conversion */
workd+ipntr[2-1]-1 /*to keep the same indices as in ARPACK doc */
);
} else {
converged = NL_TRUE;
}
}
if(info < 0) {
if(symmetric) {
nl_fprintf(stderr, "\nError with dsaupd(): %d\n", info);
} else {
nl_fprintf(stderr, "\nError with dnaupd(): %d\n", info);
}
} else {
if(nlCurrentContext->verbose) {
fprintf(stderr, "\nconverged\n");
}
select = NL_NEW_ARRAY(ARlogical, ncv);
for(i=0; i<ncv; ++i) {
select[i] = 1;
}
if(nlCurrentContext->verbose) {
if(symmetric) {
nl_printf("calling dseupd()\n");
} else {
nl_printf("calling dneupd()\n");
}
}
if(symmetric) {
ARPACK()->dseupd(
&rvec, howmny, select, d, v,
&ldv, &sigmar, bmat, &n, which, &nev,
&tol, resid, &ncv, v, &ldv,
iparam, ipntr, workd,
workl, &lworkl, &ierr
);
} else {
ARPACK()->dneupd(
&rvec, howmny, select, d, d+ncv,
v, &ldv,
&sigmar, &sigmai, workev, bmat, &n,
which, &nev, &tol,
resid, &ncv, v, &ldv, iparam,
ipntr, workd, workl, &lworkl, &ierr
) ;
}
if(nlCurrentContext->verbose) {
if(ierr != 0) {
if(symmetric) {
nl_fprintf(stderr, "Error with dseupd(): %d\n", ierr);
} else {
nl_fprintf(stderr, "Error with dneupd(): %d\n", ierr);
}
} else {
if(symmetric) {
nl_printf("dseupd() OK, nconv= %d\n", iparam[3-1]);
} else {
nl_printf("dneupd() OK, nconv= %d\n", iparam[3-1]);
}
}
}
NL_DELETE_ARRAY(select);
}
for(i=0; i<nev; ++i) {
d[i] = (fabs(d[i]) < 1e-30) ? 1e30 : 1.0 / d[i] ;
d[i] += nlCurrentContext->eigen_shift ;
}
/* Make it visible to the eigen_compare function */
nlCurrentContext->temp_eigen_value = d;
sorted = NL_NEW_ARRAY(int, nev);
for(i=0; i<nev; ++i) {
sorted[i] = i;
}
qsort(sorted, (size_t)nev, sizeof(NLuint), eigencompare);
nlCurrentContext->temp_eigen_value = NULL;
for(k=0; k<nev; ++k) {
kk = sorted[k];
nlCurrentContext->eigen_value[k] = d[kk];
for(i=0; i<(int)nlCurrentContext->nb_variables; ++i) {
if(!nlCurrentContext->variable_is_locked[i]) {
index = (int)nlCurrentContext->variable_index[i];
nl_assert(index < n);
value = v[kk*n+index];
NL_BUFFER_ITEM(
nlCurrentContext->variable_buffer[k],(NLuint)i
) = value;
}
}
}
NL_DELETE_ARRAY(sorted);
NL_DELETE_ARRAY(workl);
NL_DELETE_ARRAY(d);
NL_DELETE_ARRAY(v);
NL_DELETE_ARRAY(resid);
NL_DELETE_ARRAY(workd);
NL_DELETE_ARRAY(workev);
nlDeleteMatrix(OP);
NL_DELETE_ARRAY(iparam);
NL_DELETE_ARRAY(ipntr);
}
/******* extracted from nl_mkl.c *******/
typedef unsigned int MKL_INT;
typedef void (*FUNPTR_mkl_cspblas_dcsrgemv)(
const char *transa, const MKL_INT *m, const double *a,
const MKL_INT *ia, const MKL_INT *ja, const double *x, double *y
);
typedef void (*FUNPTR_mkl_cspblas_dcsrsymv)(
const char *transa, const MKL_INT *m, const double *a,
const MKL_INT *ia, const MKL_INT *ja, const double *x, double *y
);
typedef struct {
NLdll DLL_mkl_intel_lp64;
NLdll DLL_mkl_intel_thread;
NLdll DLL_mkl_core;
NLdll DLL_iomp5;
FUNPTR_mkl_cspblas_dcsrgemv mkl_cspblas_dcsrgemv;
FUNPTR_mkl_cspblas_dcsrsymv mkl_cspblas_dcsrsymv;
} MKLContext;
static MKLContext* MKL() {
static MKLContext context;
static NLboolean init = NL_FALSE;
if(!init) {
init = NL_TRUE;
memset(&context, 0, sizeof(context));
}
return &context;
}
NLboolean nlExtensionIsInitialized_MKL() {
if(
MKL()->DLL_iomp5 == NULL ||
MKL()->DLL_mkl_core == NULL ||
MKL()->DLL_mkl_intel_thread == NULL ||
MKL()->DLL_mkl_intel_lp64 == NULL ||
MKL()->mkl_cspblas_dcsrgemv == NULL ||
MKL()->mkl_cspblas_dcsrsymv == NULL
) {
return NL_FALSE;
}
return NL_TRUE;
}
#define find_mkl_func(name) \
if( \
( \
MKL()->name = \
(FUNPTR_##name)nlFindFunction( \
MKL()->DLL_mkl_intel_lp64,#name \
) \
) == NULL \
) { \
nlError("nlInitExtension_MKL","function not found"); \
return NL_FALSE; \
}
static void nlTerminateExtension_MKL(void) {
if(!nlExtensionIsInitialized_MKL()) {
return;
}
nlCloseDLL(MKL()->DLL_mkl_intel_lp64);
nlCloseDLL(MKL()->DLL_mkl_intel_thread);
nlCloseDLL(MKL()->DLL_mkl_core);
nlCloseDLL(MKL()->DLL_iomp5);
}
NLMultMatrixVectorFunc NLMultMatrixVector_MKL = NULL;
static void NLMultMatrixVector_MKL_impl(NLMatrix M_in, const double* x, double* y) {
NLCRSMatrix* M = (NLCRSMatrix*)(M_in);
nl_debug_assert(M_in->type == NL_MATRIX_CRS);
if(M->symmetric_storage) {
MKL()->mkl_cspblas_dcsrsymv(
"N", /* No transpose */
&M->m,
M->val,
M->rowptr,
M->colind,
x,
y
);
} else {
MKL()->mkl_cspblas_dcsrgemv(
"N", /* No transpose */
&M->m,
M->val,
M->rowptr,
M->colind,
x,
y
);
}
}
#define INTEL_PREFIX "/opt/intel/"
#define LIB_DIR "lib/intel64/"
#define MKL_PREFIX INTEL_PREFIX "mkl/" LIB_DIR
NLboolean nlInitExtension_MKL(void) {
NLenum flags = NL_LINK_LAZY | NL_LINK_GLOBAL;
if(nlCurrentContext == NULL || !nlCurrentContext->verbose) {
flags |= NL_LINK_QUIET;
}
if(MKL()->DLL_mkl_intel_lp64 != NULL) {
return nlExtensionIsInitialized_MKL();
}
MKL()->DLL_iomp5 = nlOpenDLL(
INTEL_PREFIX LIB_DIR "libiomp5.so",
flags
);
MKL()->DLL_mkl_core = nlOpenDLL(
MKL_PREFIX "libmkl_core.so",
flags
);
MKL()->DLL_mkl_intel_thread = nlOpenDLL(
MKL_PREFIX "libmkl_intel_thread.so",
flags
);
MKL()->DLL_mkl_intel_lp64 = nlOpenDLL(
MKL_PREFIX "libmkl_intel_lp64.so",
flags
);
if(
MKL()->DLL_iomp5 == NULL ||
MKL()->DLL_mkl_core == NULL ||
MKL()->DLL_mkl_intel_thread == NULL ||
MKL()->DLL_mkl_intel_lp64 == NULL
) {
return NL_FALSE;
}
find_mkl_func(mkl_cspblas_dcsrgemv);
find_mkl_func(mkl_cspblas_dcsrsymv);
if(nlExtensionIsInitialized_MKL()) {
NLMultMatrixVector_MKL = NLMultMatrixVector_MKL_impl;
}
atexit(nlTerminateExtension_MKL);
return NL_TRUE;
}
/******* extracted from nl_cuda.c *******/
/* CUDA structures and functions */
/* Repeated here so that one can compile OpenNL without */
/* requiring CUDA to be installed in the system. */
struct cudaDeviceProp {
char name[256];
size_t totalGlobalMem;
size_t sharedMemPerBlock;
int regsPerBlock;
int warpSize;
size_t memPitch;
int maxThreadsPerBlock;
int maxThreadsDim[3];
int maxGridSize[3];
int clockRate;
size_t totalConstMem;
int major;
int minor;
size_t textureAlignment;
size_t texturePitchAlignment;
int deviceOverlap;
int multiProcessorCount;
int kernelExecTimeoutEnabled;
int integrated;
int canMapHostMemory;
int computeMode;
int maxTexture1D;
int maxTexture1DMipmap;
int maxTexture1DLinear;
int maxTexture2D[2];
int maxTexture2DMipmap[2];
int maxTexture2DLinear[3];
int maxTexture2DGather[2];
int maxTexture3D[3];
int maxTexture3DAlt[3];
int maxTextureCubemap;
int maxTexture1DLayered[2];
int maxTexture2DLayered[3];
int maxTextureCubemapLayered[2];
int maxSurface1D;
int maxSurface2D[2];
int maxSurface3D[3];
int maxSurface1DLayered[2];
int maxSurface2DLayered[3];
int maxSurfaceCubemap;
int maxSurfaceCubemapLayered[2];
size_t surfaceAlignment;
int concurrentKernels;
int ECCEnabled;
int pciBusID;
int pciDeviceID;
int pciDomainID;
int tccDriver;
int asyncEngineCount;
int unifiedAddressing;
int memoryClockRate;
int memoryBusWidth;
int l2CacheSize;
int maxThreadsPerMultiProcessor;
int streamPrioritiesSupported;
int globalL1CacheSupported;
int localL1CacheSupported;
size_t sharedMemPerMultiprocessor;
int regsPerMultiprocessor;
int managedMemSupported;
int isMultiGpuBoard;
int multiGpuBoardGroupID;
int singleToDoublePrecisionPerfRatio;
int pageableMemoryAccess;
int concurrentManagedAccess;
char padding[1024]; /* More room for future evolutions */
};
enum cudaComputeMode {
cudaComputeModeDefault = 0,
cudaComputeModeExclusive = 1,
cudaComputeModeProhibited = 2,
cudaComputeModeExclusiveProcess = 3
};
enum cudaMemcpyKind {
cudaMemcpyHostToHost = 0,
cudaMemcpyHostToDevice = 1,
cudaMemcpyDeviceToHost = 2,
cudaMemcpyDeviceToDevice = 3,
cudaMemcpyDefault = 4
};
typedef int cudaError_t;
typedef cudaError_t (*FUNPTR_cudaGetDeviceCount)(int* device_count);
typedef cudaError_t (*FUNPTR_cudaGetDeviceProperties)(
struct cudaDeviceProp *props, int device
);
typedef cudaError_t (*FUNPTR_cudaDeviceReset)(void);
typedef cudaError_t (*FUNPTR_cudaMalloc)(void **devPtr, size_t size);
typedef cudaError_t (*FUNPTR_cudaFree)(void* devPtr);
typedef cudaError_t (*FUNPTR_cudaMemcpy)(
void *dst, const void *src, size_t count, enum cudaMemcpyKind kind
);
#define find_cuda_func(name) \
if( \
( \
CUDA()->name = \
(FUNPTR_##name)nlFindFunction( \
CUDA()->DLL_cudart,#name \
) \
) == NULL \
) { \
nlError("nlInitExtension_CUDA: function not found", #name); \
return NL_FALSE; \
}
/* CUBLAS structures and functions */
struct cublasContext;
typedef struct cublasContext *cublasHandle_t;
typedef int cublasStatus_t;
typedef enum {
CUBLAS_SIDE_LEFT =0,
CUBLAS_SIDE_RIGHT=1
} cublasSideMode_t;
typedef enum {
CUBLAS_FILL_MODE_LOWER=0,
CUBLAS_FILL_MODE_UPPER=1
} cublasFillMode_t;
typedef enum {
CUBLAS_OP_N=0,
CUBLAS_OP_T=1,
CUBLAS_OP_C=2
} cublasOperation_t;
typedef enum {
CUBLAS_DIAG_NON_UNIT=0,
CUBLAS_DIAG_UNIT=1
} cublasDiagType_t;
typedef cublasStatus_t (*FUNPTR_cublasCreate)(cublasHandle_t* handle);
typedef cublasStatus_t (*FUNPTR_cublasDestroy)(cublasHandle_t handle);
typedef cublasStatus_t (*FUNPTR_cublasGetVersion)(
cublasHandle_t handle, int* version
);
typedef cublasStatus_t (*FUNPTR_cublasDdot)(
cublasHandle_t handle, int n,
const double *x, int incx,
const double *y, int incy,
double *result
);
typedef cublasStatus_t (*FUNPTR_cublasDcopy)(
cublasHandle_t handle, int n,
const double *x, int incx,
const double *y, int incy
);
typedef cublasStatus_t (*FUNPTR_cublasDaxpy)(
cublasHandle_t handle, int n,
const double* alpha,
const double *x, int incx,
const double *y, int incy
);
typedef cublasStatus_t (*FUNPTR_cublasDscal)(
cublasHandle_t handle, int n,
const double* alpha,
const double *x, int incx
);
typedef cublasStatus_t (*FUNPTR_cublasDnrm2)(
cublasHandle_t handle, int n,
const double *x, int incx,
double* result
);
typedef cublasStatus_t (*FUNPTR_cublasDdgmm)(
cublasHandle_t handle, cublasSideMode_t mode,
int m, int n,
const double* A, int lda,
const double* x, int incx,
double* C, int ldc
);
typedef cublasStatus_t (*FUNPTR_cublasDgemv)(
cublasHandle_t handle,
cublasOperation_t trans,
int m,
int n,
const double *alpha,
const double *A,
int lda,
const double *x,
int incx,
const double *beta,
double *y,
int incy
);
typedef cublasStatus_t (*FUNPTR_cublasDtpsv)(
cublasHandle_t handle, cublasFillMode_t uplo,
cublasOperation_t trans, cublasDiagType_t diag,
int n, const double *AP,
double* x, int incx
);
#define find_cublas_func(name) \
if( \
( \
CUDA()->name = \
(FUNPTR_##name)nlFindFunction( \
CUDA()->DLL_cublas,#name "_v2" \
) \
) == NULL \
) { \
nlError("nlInitExtension_CUDA: function not found", #name); \
return NL_FALSE; \
}
#define find_cublas_func_v1(name) \
if( \
( \
CUDA()->name = \
(FUNPTR_##name)nlFindFunction( \
CUDA()->DLL_cublas,#name \
) \
) == NULL \
) { \
nlError("nlInitExtension_CUDA: function not found", #name); \
return NL_FALSE; \
}
/* CUSPARSE structures and functions */
struct cusparseContext;
typedef struct cusparseContext *cusparseHandle_t;
typedef int cusparseStatus_t;
struct cusparseMatDescr;
typedef struct cusparseMatDescr *cusparseMatDescr_t;
typedef enum {
CUSPARSE_MATRIX_TYPE_GENERAL = 0,
CUSPARSE_MATRIX_TYPE_SYMMETRIC = 1,
CUSPARSE_MATRIX_TYPE_HERMITIAN = 2,
CUSPARSE_MATRIX_TYPE_TRIANGULAR = 3
} cusparseMatrixType_t;
typedef enum {
CUSPARSE_INDEX_BASE_ZERO = 0,
CUSPARSE_INDEX_BASE_ONE = 1
} cusparseIndexBase_t;
typedef enum {
CUSPARSE_OPERATION_NON_TRANSPOSE = 0,
CUSPARSE_OPERATION_TRANSPOSE = 1,
CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE = 2
} cusparseOperation_t;
struct cusparseHybMat;
typedef struct cusparseHybMat *cusparseHybMat_t;
typedef enum {
CUSPARSE_HYB_PARTITION_AUTO = 0,
CUSPARSE_HYB_PARTITION_USER = 1,
CUSPARSE_HYB_PARTITION_MAX = 2
} cusparseHybPartition_t;
typedef cusparseStatus_t (*FUNPTR_cusparseCreate)(cusparseHandle_t* handle);
typedef cusparseStatus_t (*FUNPTR_cusparseDestroy)(cusparseHandle_t handle);
typedef cusparseStatus_t (*FUNPTR_cusparseGetVersion)(
cusparseHandle_t handle, int* version
);
typedef cusparseStatus_t (*FUNPTR_cusparseCreateMatDescr)(
cusparseMatDescr_t* descr
);
typedef cusparseStatus_t (*FUNPTR_cusparseDestroyMatDescr)(
cusparseMatDescr_t descr
);
typedef cusparseStatus_t (*FUNPTR_cusparseSetMatType)(
cusparseMatDescr_t descr, cusparseMatrixType_t mtype
);
typedef cusparseStatus_t (*FUNPTR_cusparseSetMatIndexBase)(
cusparseMatDescr_t descr, cusparseIndexBase_t ibase
);
typedef cusparseStatus_t (*FUNPTR_cusparseDcsrmv)(
cusparseHandle_t handle, cusparseOperation_t transA,
int m, int n, int nnz,
const double *alpha, const cusparseMatDescr_t descrA,
const double *csrSortedValA, const int *csrSortedRowPtrA,
const int *csrSortedColIndA, const double *x,
const double *beta, double *y
);
typedef cusparseStatus_t (*FUNPTR_cusparseCreateHybMat)(
cusparseHybMat_t *hybA
);
typedef cusparseStatus_t (*FUNPTR_cusparseDestroyHybMat)(
cusparseHybMat_t hybA
);
typedef cusparseStatus_t (*FUNPTR_cusparseDcsr2hyb)(
cusparseHandle_t handle,
int m,
int n,
const cusparseMatDescr_t descrA,
const double *csrSortedValA,
const int *csrSortedRowPtrA,
const int *csrSortedColIndA,
cusparseHybMat_t hybA,
int userEllWidth,
cusparseHybPartition_t partitionType
);
typedef cusparseStatus_t (*FUNPTR_cusparseDhybmv)(
cusparseHandle_t handle,
cusparseOperation_t transA,
const double *alpha,
const cusparseMatDescr_t descrA,
const cusparseHybMat_t hybA,
const double *x,
const double *beta,
double *y
);
#define find_cusparse_func(name) \
if( \
( \
CUDA()->name = \
(FUNPTR_##name)nlFindFunction( \
CUDA()->DLL_cusparse,#name \
) \
) == NULL \
) { \
nlError("nlInitExtension_CUDA : function not found", #name); \
return NL_FALSE; \
}
typedef struct {
NLdll DLL_cudart;
FUNPTR_cudaGetDeviceCount cudaGetDeviceCount;
FUNPTR_cudaGetDeviceProperties cudaGetDeviceProperties;
FUNPTR_cudaDeviceReset cudaDeviceReset;
FUNPTR_cudaMalloc cudaMalloc;
FUNPTR_cudaFree cudaFree;
FUNPTR_cudaMemcpy cudaMemcpy;
NLdll DLL_cublas;
cublasHandle_t HNDL_cublas;
FUNPTR_cublasCreate cublasCreate;
FUNPTR_cublasDestroy cublasDestroy;
FUNPTR_cublasGetVersion cublasGetVersion;
FUNPTR_cublasDdot cublasDdot;
FUNPTR_cublasDcopy cublasDcopy;
FUNPTR_cublasDaxpy cublasDaxpy;
FUNPTR_cublasDscal cublasDscal;
FUNPTR_cublasDnrm2 cublasDnrm2;
FUNPTR_cublasDdgmm cublasDdgmm;
FUNPTR_cublasDgemv cublasDgemv;
FUNPTR_cublasDtpsv cublasDtpsv;
NLdll DLL_cusparse;
cusparseHandle_t HNDL_cusparse;
FUNPTR_cusparseCreate cusparseCreate;
FUNPTR_cusparseDestroy cusparseDestroy;
FUNPTR_cusparseGetVersion cusparseGetVersion;
FUNPTR_cusparseCreateMatDescr cusparseCreateMatDescr;
FUNPTR_cusparseDestroyMatDescr cusparseDestroyMatDescr;
FUNPTR_cusparseSetMatType cusparseSetMatType;
FUNPTR_cusparseSetMatIndexBase cusparseSetMatIndexBase;
FUNPTR_cusparseDcsrmv cusparseDcsrmv;
FUNPTR_cusparseCreateHybMat cusparseCreateHybMat;
FUNPTR_cusparseDestroyHybMat cusparseDestroyHybMat;
FUNPTR_cusparseDcsr2hyb cusparseDcsr2hyb;
FUNPTR_cusparseDhybmv cusparseDhybmv;
int devID;
} CUDAContext;
static CUDAContext* CUDA() {
static CUDAContext context;
static NLboolean init = NL_FALSE;
if(!init) {
init = NL_TRUE;
memset(&context, 0, sizeof(context));
}
return &context;
}
NLboolean nlExtensionIsInitialized_CUDA() {
if(
CUDA()->DLL_cudart == NULL ||
CUDA()->cudaGetDeviceCount == NULL ||
CUDA()->cudaGetDeviceProperties == NULL ||
CUDA()->cudaDeviceReset == NULL ||
CUDA()->cudaMalloc == NULL ||
CUDA()->cudaFree == NULL ||
CUDA()->cudaMemcpy == NULL ||
CUDA()->DLL_cublas == NULL ||
CUDA()->HNDL_cublas == NULL ||
CUDA()->cublasCreate == NULL ||
CUDA()->cublasDestroy == NULL ||
CUDA()->cublasGetVersion == NULL ||
CUDA()->cublasDdot == NULL ||
CUDA()->cublasDcopy == NULL ||
CUDA()->cublasDaxpy == NULL ||
CUDA()->cublasDscal == NULL ||
CUDA()->cublasDnrm2 == NULL ||
CUDA()->cublasDdgmm == NULL ||
CUDA()->DLL_cusparse == NULL ||
CUDA()->HNDL_cusparse == NULL ||
CUDA()->cusparseCreate == NULL ||
CUDA()->cusparseDestroy == NULL ||
CUDA()->cusparseGetVersion == NULL ||
CUDA()->cusparseCreateMatDescr == NULL ||
CUDA()->cusparseDestroyMatDescr == NULL ||
CUDA()->cusparseSetMatType == NULL ||
CUDA()->cusparseSetMatIndexBase == NULL ||
CUDA()->cusparseDcsrmv == NULL ||
CUDA()->cusparseCreateHybMat == NULL ||
CUDA()->cusparseDestroyHybMat == NULL ||
CUDA()->cusparseDcsr2hyb == NULL ||
CUDA()->cusparseDhybmv == NULL
) {
return NL_FALSE;
}
return NL_TRUE;
}
static void nlTerminateExtension_CUDA(void) {
if(!nlExtensionIsInitialized_CUDA()) {
return;
}
CUDA()->cusparseDestroy(CUDA()->HNDL_cusparse);
nlCloseDLL(CUDA()->DLL_cusparse);
CUDA()->cublasDestroy(CUDA()->HNDL_cublas);
nlCloseDLL(CUDA()->DLL_cublas);
CUDA()->cudaDeviceReset();
nlCloseDLL(CUDA()->DLL_cudart);
}
static int ConvertSMVer2Cores(int major, int minor) {
/* Defines for GPU Architecture types (using the SM version
to determine the # of cores per SM */
typedef struct {
int SM; /* 0xMm (hexadecimal notation),
M = SM Major version,
and m = SM minor version */
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] = {
{ 0x10, 8 }, /* Tesla Generation (SM 1.0) G80 class */
{ 0x11, 8 }, /* Tesla Generation (SM 1.1) G8x class */
{ 0x12, 8 }, /* Tesla Generation (SM 1.2) G9x class */
{ 0x13, 8 }, /* Tesla Generation (SM 1.3) GT200 class */
{ 0x20, 32 }, /* Fermi Generation (SM 2.0) GF100 class */
{ 0x21, 48 }, /* Fermi Generation (SM 2.1) GF10x class */
{ 0x30, 192}, /* Kepler Generation (SM 3.0) GK10x class */
{ 0x35, 192}, /* Kepler Generation (SM 3.5) GK11x class */
{ 0x50, 128}, /* Maxwell Generation (SM 5.0) GM10x class
(yes, #cores smaller than with 3.x) */
{ 0x52, 128}, /* Maxwell Generation (SM 5.2) GM20x class */
{ 0x60, 64 }, /* Pascal Generation (SM 6.0) GP100,GP102
(yes, 64, but GP100 has superfast double precision) */
{ 0x61, 128}, /* Pascal Generation (SM 6.1) GP104 class
(but FP64 runs as 1/32 FP32 speed) */
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1) {
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) {
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
/* If we don't find the values, we default use the
previous one to run properly */
nl_printf(
"MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n",
major, minor, nGpuArchCoresPerSM[8].Cores
);
return nGpuArchCoresPerSM[8].Cores;
}
static int getBestDeviceID() {
int current_device = 0, sm_per_multiproc = 0;
int max_compute_perf = 0, max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
int compute_perf = 0;
struct cudaDeviceProp deviceProp;
CUDA()->cudaGetDeviceCount(&device_count);
/* Find the best major SM Architecture GPU device */
while (current_device < device_count) {
CUDA()->cudaGetDeviceProperties(&deviceProp, current_device);
/* If this GPU is not running on Compute Mode prohibited,
then we can add it to the list */
if (deviceProp.computeMode != cudaComputeModeProhibited) {
if (deviceProp.major > 0 && deviceProp.major < 9999) {
best_SM_arch = MAX(best_SM_arch, deviceProp.major);
}
}
current_device++;
}
/* Find the best CUDA capable GPU device */
current_device = 0;
while (current_device < device_count) {
CUDA()->cudaGetDeviceProperties(&deviceProp, current_device);
/* If this GPU is not running on Compute Mode prohibited,
then we can add it to the list */
if (deviceProp.computeMode != cudaComputeModeProhibited) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc = ConvertSMVer2Cores(
deviceProp.major, deviceProp.minor
);
}
compute_perf =
deviceProp.multiProcessorCount *
sm_per_multiproc * deviceProp.clockRate;
if (compute_perf > max_compute_perf) {
/* If we find GPU with SM major > 2, search only these */
if (best_SM_arch > 2) {
/* If our device==dest_SM_arch, choose this, or else pass */
if (deviceProp.major == best_SM_arch) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
}
++current_device;
}
return max_perf_device;
}
#ifdef NL_OS_UNIX
# define LIBPREFIX "lib"
# ifdef NL_OS_APPLE
# define LIBEXTENSION ".dylib"
# else
# define LIBEXTENSION ".so"
# endif
#else
# define LIBPREFIX
# define LIBEXTENSION ".dll"
#endif
NLboolean nlInitExtension_CUDA(void) {
struct cudaDeviceProp deviceProp;
int cublas_version;
int cusparse_version;
NLenum flags = NL_LINK_LAZY | NL_LINK_GLOBAL;
if(nlCurrentContext == NULL || !nlCurrentContext->verbose) {
flags |= NL_LINK_QUIET;
}
if(nlExtensionIsInitialized_CUDA()) {
return NL_TRUE;
}
CUDA()->DLL_cudart = nlOpenDLL(
LIBPREFIX "cudart" LIBEXTENSION, flags
);
find_cuda_func(cudaGetDeviceCount);
find_cuda_func(cudaGetDeviceProperties);
find_cuda_func(cudaDeviceReset);
find_cuda_func(cudaMalloc);
find_cuda_func(cudaFree);
find_cuda_func(cudaMemcpy);
CUDA()->devID = getBestDeviceID();
if(CUDA()->cudaGetDeviceProperties(&deviceProp, CUDA()->devID)) {
nl_fprintf(stderr,"OpenNL CUDA: could not find a CUDA device\n");
return NL_FALSE;
}
nl_printf("OpenNL CUDA: Device ID = %d\n", CUDA()->devID);
nl_printf("OpenNL CUDA: Device name=%s\n", deviceProp.name);
nl_printf(
"OpenNL CUDA: Device has %d Multi-Processors, "
"%d cores per Multi-Processor, SM %d.%d compute capabilities\n",
deviceProp.multiProcessorCount,
ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
deviceProp.major, deviceProp.minor
);
nl_printf(
"OpenNL CUDA: %d kB shared mem. per block, %d per MP\n",
(int)(deviceProp.sharedMemPerBlock / 1024),
(int)(deviceProp.sharedMemPerMultiprocessor / 1024)
);
nl_printf(
"OpenNL CUDA: %d regs. per block, %d per MP\n",
deviceProp.regsPerBlock,
deviceProp.regsPerMultiprocessor
);
nl_printf(
"OpenNL CUDA: warpsize=%d\n",
deviceProp.warpSize
);
if ((deviceProp.major * 0x10 + deviceProp.minor) < 0x11) {
nl_fprintf(stderr, "OpenNL CUDA requires a minimum CUDA compute 1.1 capability\n");
CUDA()->cudaDeviceReset();
return NL_FALSE;
}
CUDA()->DLL_cublas = nlOpenDLL(
LIBPREFIX "cublas" LIBEXTENSION, flags
);
find_cublas_func(cublasCreate);
find_cublas_func(cublasDestroy);
find_cublas_func(cublasGetVersion);
find_cublas_func(cublasDdot);
find_cublas_func(cublasDaxpy);
find_cublas_func(cublasDcopy);
find_cublas_func(cublasDscal);
find_cublas_func(cublasDnrm2);
find_cublas_func(cublasDgemv);
find_cublas_func(cublasDtpsv);
find_cublas_func_v1(cublasDdgmm);
if(CUDA()->cublasCreate(&CUDA()->HNDL_cublas)) {
return NL_FALSE;
}
if(CUDA()->cublasGetVersion(CUDA()->HNDL_cublas, &cublas_version)) {
return NL_FALSE;
}
nl_printf("OpenNL CUDA: cublas version = %d\n", cublas_version);
CUDA()->DLL_cusparse = nlOpenDLL(
LIBPREFIX "cusparse" LIBEXTENSION, flags
);
find_cusparse_func(cusparseCreate);
find_cusparse_func(cusparseDestroy);
find_cusparse_func(cusparseGetVersion);
find_cusparse_func(cusparseCreateMatDescr);
find_cusparse_func(cusparseDestroyMatDescr);
find_cusparse_func(cusparseSetMatType);
find_cusparse_func(cusparseSetMatIndexBase);
find_cusparse_func(cusparseDcsrmv);
find_cusparse_func(cusparseCreateHybMat);
find_cusparse_func(cusparseDestroyHybMat);
find_cusparse_func(cusparseDcsr2hyb);
find_cusparse_func(cusparseDhybmv);
if(CUDA()->cusparseCreate(&CUDA()->HNDL_cusparse)) {
return NL_FALSE;
}
if(CUDA()->cusparseGetVersion(CUDA()->HNDL_cusparse, &cusparse_version)) {
return NL_FALSE;
}
nl_printf("OpenNL CUDA: cusparse version = %d\n", cusparse_version);
if(!nlExtensionIsInitialized_CUDA()) {
return NL_FALSE;
}
atexit(nlTerminateExtension_CUDA);
return NL_TRUE;
}
static void nlCUDACheckImpl(int status, int line) {
if(status != 0) {
nl_fprintf(stderr,"nl_cuda.c:%d fatal error %d\n",line, status);
CUDA()->cudaDeviceReset();
exit(-1);
}
}
#define nlCUDACheck(status) nlCUDACheckImpl(status, __LINE__)
typedef struct {
NLuint m;
NLuint n;
NLenum type;
NLDestroyMatrixFunc destroy_func;
NLMultMatrixVectorFunc mult_func;
cusparseMatDescr_t descr;
NLuint nnz;
int* colind;
int* rowptr;
double* val;
cusparseHybMat_t hyb;
} NLCUDASparseMatrix;
static void nlCRSMatrixCUDADestroyCRS(NLCUDASparseMatrix* Mcuda) {
if(Mcuda->colind != NULL) {
nlCUDACheck(CUDA()->cudaFree(Mcuda->colind));
Mcuda->colind = NULL;
}
if(Mcuda->rowptr != NULL) {
nlCUDACheck(CUDA()->cudaFree(Mcuda->rowptr));
Mcuda->rowptr = NULL;
}
if(Mcuda->val != NULL) {
nlCUDACheck(CUDA()->cudaFree(Mcuda->val));
Mcuda->val = NULL;
}
}
static void nlCRSMatrixCUDADestroy(NLCUDASparseMatrix* Mcuda) {
if(Mcuda->hyb != NULL) {
nlCUDACheck(CUDA()->cusparseDestroyHybMat(Mcuda->hyb));
}
nlCRSMatrixCUDADestroyCRS(Mcuda);
nlCUDACheck(CUDA()->cusparseDestroyMatDescr(Mcuda->descr));
memset(Mcuda, 0, sizeof(*Mcuda));
}
static void nlCRSMatrixCUDAMult(
NLCUDASparseMatrix* Mcuda, const double* x, double* y
) {
const double one = 1;
const double zero = 0;
if(Mcuda->hyb != NULL) {
nlCUDACheck(
CUDA()->cusparseDhybmv(
CUDA()->HNDL_cusparse,
CUSPARSE_OPERATION_NON_TRANSPOSE,
&one,
Mcuda->descr,
Mcuda->hyb,
x,
&zero,
y
)
);
} else {
nlCUDACheck(
CUDA()->cusparseDcsrmv(
CUDA()->HNDL_cusparse,
CUSPARSE_OPERATION_NON_TRANSPOSE,
(int)Mcuda->m,
(int)Mcuda->n,
(int)Mcuda->nnz,
&one,
Mcuda->descr,
Mcuda->val,
Mcuda->rowptr,
Mcuda->colind,
x,
&zero,
y
)
);
}
nlCUDABlas()->flops += (NLulong)(2*Mcuda->nnz);
}
NLMatrix nlCUDAMatrixNewFromCRSMatrix(NLMatrix M_in) {
NLCUDASparseMatrix* Mcuda = NL_NEW(NLCUDASparseMatrix);
NLCRSMatrix* M = (NLCRSMatrix*)(M_in);
size_t colind_sz, rowptr_sz, val_sz;
nl_assert(M_in->type == NL_MATRIX_CRS);
nlCUDACheck(CUDA()->cusparseCreateMatDescr(&Mcuda->descr));
if(M->symmetric_storage) {
nlCUDACheck(CUDA()->cusparseSetMatType(
Mcuda->descr, CUSPARSE_MATRIX_TYPE_SYMMETRIC)
);
} else {
nlCUDACheck(CUDA()->cusparseSetMatType(
Mcuda->descr, CUSPARSE_MATRIX_TYPE_GENERAL)
);
}
nlCUDACheck(CUDA()->cusparseSetMatIndexBase(
Mcuda->descr, CUSPARSE_INDEX_BASE_ZERO)
);
Mcuda->m = M->m;
Mcuda->n = M->n;
Mcuda->nnz = nlCRSMatrixNNZ(M);
colind_sz = (size_t)Mcuda->nnz*sizeof(int);
rowptr_sz = (size_t)(Mcuda->m+1)*sizeof(int);
val_sz = (size_t)Mcuda->nnz*sizeof(double);
nlCUDACheck(CUDA()->cudaMalloc((void**)&Mcuda->colind,colind_sz));
nlCUDACheck(CUDA()->cudaMalloc((void**)&Mcuda->rowptr,rowptr_sz));
nlCUDACheck(CUDA()->cudaMalloc((void**)&Mcuda->val,val_sz));
nlCUDACheck(CUDA()->cudaMemcpy(
Mcuda->colind, M->colind, colind_sz, cudaMemcpyHostToDevice)
);
nlCUDACheck(CUDA()->cudaMemcpy(
Mcuda->rowptr, M->rowptr, rowptr_sz, cudaMemcpyHostToDevice)
);
nlCUDACheck(CUDA()->cudaMemcpy(
Mcuda->val, M->val, val_sz, cudaMemcpyHostToDevice)
);
Mcuda->hyb=NULL;
if(!M->symmetric_storage) {
nlCUDACheck(CUDA()->cusparseCreateHybMat(&Mcuda->hyb));
nlCUDACheck(CUDA()->cusparseDcsr2hyb(
CUDA()->HNDL_cusparse,
(int)M->m,
(int)M->n,
Mcuda->descr,
Mcuda->val,
Mcuda->rowptr,
Mcuda->colind,
Mcuda->hyb,
0,
CUSPARSE_HYB_PARTITION_AUTO
));
/* We no longer need the CRS part */
nlCRSMatrixCUDADestroyCRS(Mcuda);
}
Mcuda->type=NL_MATRIX_OTHER;
Mcuda->destroy_func=(NLDestroyMatrixFunc)nlCRSMatrixCUDADestroy;
Mcuda->mult_func=(NLMultMatrixVectorFunc)nlCRSMatrixCUDAMult;
return (NLMatrix)Mcuda;
}
typedef struct {
NLuint m;
NLuint n;
NLenum type;
NLDestroyMatrixFunc destroy_func;
NLMultMatrixVectorFunc mult_func;
double* val;
} NLDiagonalMatrixCUDA;
static void nlDiagonalMatrixCUDADestroy(NLDiagonalMatrixCUDA* Mcuda) {
nlCUDACheck(CUDA()->cudaFree(Mcuda->val));
memset(Mcuda, 0, sizeof(*Mcuda));
}
static void nlDiagonalMatrixCUDAMult(
NLDiagonalMatrixCUDA* Mcuda, const double* x, double* y
) {
int N = (int)Mcuda->n;
/*
* vector x vector component-wise product implemented
* using diagonal matrix x matrix function.
*/
nlCUDACheck(CUDA()->cublasDdgmm(
CUDA()->HNDL_cublas, CUBLAS_SIDE_LEFT,
N, 1,
x, N,
Mcuda->val, 1,
y, N
));
nlCUDABlas()->flops += (NLulong)N;
}
static NLMatrix nlDiagonalMatrixCUDANew(const double* diag, NLuint n) {
NLDiagonalMatrixCUDA* Mcuda = NL_NEW(NLDiagonalMatrixCUDA);
Mcuda->m = n;
Mcuda->n = n;
Mcuda->type = NL_MATRIX_OTHER;
nlCUDACheck(CUDA()->cudaMalloc(
(void**)&Mcuda->val, n*sizeof(double))
);
nlCUDACheck(CUDA()->cudaMemcpy(
Mcuda->val, diag, n*sizeof(double), cudaMemcpyHostToDevice)
);
Mcuda->destroy_func=(NLDestroyMatrixFunc)nlDiagonalMatrixCUDADestroy;
Mcuda->mult_func=(NLMultMatrixVectorFunc)nlDiagonalMatrixCUDAMult;
return (NLMatrix)Mcuda;
}
NLMatrix nlCUDAJacobiPreconditionerNewFromCRSMatrix(NLMatrix M_in) {
NLuint N = M_in->n;
NLuint i,jj;
double* diag = NULL;
NLMatrix result = NULL;
NLCRSMatrix* M = (NLCRSMatrix*)(M_in);
nl_assert(M_in->type == NL_MATRIX_CRS);
diag = NL_NEW_ARRAY(double,N);
for(i=0; i<N; ++i) {
for(jj=M->rowptr[i]; jj<M->rowptr[i+1]; ++jj) {
if(M->colind[jj] == i) {
diag[i] = M->val[jj];
}
}
}
for(i=0; i<N; ++i) {
diag[i] = ((diag[i] == 0.0) ? 1.0 : 1.0 / diag[i]);
}
result = nlDiagonalMatrixCUDANew(diag, N);
NL_DELETE_ARRAY(diag);
return result;
}
static void* cuda_blas_malloc(
NLBlas_t blas, NLmemoryType type, size_t size
) {
void* result = NULL;
blas->used_ram[type] += (NLulong)size;
blas->max_used_ram[type] = MAX(
blas->max_used_ram[type],blas->used_ram[type]
);
if(type == NL_HOST_MEMORY) {
result = malloc(size);
} else {
nlCUDACheck(CUDA()->cudaMalloc(&result,size));
}
return result;
}
static void cuda_blas_free(
NLBlas_t blas, NLmemoryType type, size_t size, void* ptr
) {
blas->used_ram[type] -= (NLulong)size;
if(type == NL_HOST_MEMORY) {
free(ptr);
} else {
nlCUDACheck(CUDA()->cudaFree(ptr));
}
}
static void cuda_blas_memcpy(
NLBlas_t blas,
void* to, NLmemoryType to_type,
void* from, NLmemoryType from_type,
size_t size
) {
enum cudaMemcpyKind kind = cudaMemcpyDefault;
nl_arg_used(blas);
if(from_type == NL_HOST_MEMORY) {
if(to_type == NL_HOST_MEMORY) {
kind = cudaMemcpyHostToHost;
} else {
kind = cudaMemcpyHostToDevice;
}
} else {
if(to_type == NL_HOST_MEMORY) {
kind = cudaMemcpyDeviceToHost;
} else {
kind = cudaMemcpyDeviceToDevice;
}
}
nlCUDACheck(CUDA()->cudaMemcpy(to, from, size, kind));
}
static void cuda_blas_dcopy(
NLBlas_t blas, int n, const double *x, int incx, double *y, int incy
) {
nl_arg_used(blas);
CUDA()->cublasDcopy(CUDA()->HNDL_cublas,n,x,incx,y,incy);
}
static double cuda_blas_ddot(
NLBlas_t blas, int n, const double *x, int incx, const double *y, int incy
) {
double result = 0.0;
blas->flops += (NLulong)(2*n);
CUDA()->cublasDdot(CUDA()->HNDL_cublas,n,x,incx,y,incy,&result);
return result;
}
static double cuda_blas_dnrm2(
NLBlas_t blas, int n, const double *x, int incx
) {
double result = 0.0;
blas->flops += (NLulong)(2*n);
CUDA()->cublasDnrm2(CUDA()->HNDL_cublas,n,x,incx,&result);
return result;
}
static void cuda_blas_daxpy(
NLBlas_t blas, int n,
double a, const double *x, int incx, double *y, int incy
) {
blas->flops += (NLulong)(2*n);
CUDA()->cublasDaxpy(CUDA()->HNDL_cublas,n,&a,x,incx,y,incy);
}
static void cuda_blas_dscal(
NLBlas_t blas, int n, double a, double *x, int incx
) {
blas->flops += (NLulong)n;
CUDA()->cublasDscal(CUDA()->HNDL_cublas,n,&a,x,incx);
}
static void cuda_blas_dgemv(
NLBlas_t blas, MatrixTranspose trans, int m, int n, double alpha,
const double *A, int ldA, const double *x, int incx,
double beta, double *y, int incy
) {
nl_arg_used(blas);
/* TODO: update FLOPS */
CUDA()->cublasDgemv(
CUDA()->HNDL_cublas, (cublasOperation_t)trans,
m, n, &alpha, A, ldA, x, incx, &beta, y, incy
);
}
static void cuda_blas_dtpsv(
NLBlas_t blas, MatrixTriangle uplo, MatrixTranspose trans,
MatrixUnitTriangular diag, int n, const double *AP,
double *x, int incx
) {
nl_arg_used(blas);
/* TODO: update FLOPS */
CUDA()->cublasDtpsv(
CUDA()->HNDL_cublas,
(cublasFillMode_t)uplo,
(cublasOperation_t)trans,
(cublasDiagType_t)diag, n,
AP, x, incx
);
}
NLBlas_t nlCUDABlas() {
static NLboolean initialized = NL_FALSE;
static struct NLBlas blas;
if(!initialized) {
memset(&blas, 0, sizeof(blas));
blas.has_unified_memory = NL_FALSE;
blas.Malloc = cuda_blas_malloc;
blas.Free = cuda_blas_free;
blas.Memcpy = cuda_blas_memcpy;
blas.Dcopy = cuda_blas_dcopy;
blas.Ddot = cuda_blas_ddot;
blas.Dnrm2 = cuda_blas_dnrm2;
blas.Daxpy = cuda_blas_daxpy;
blas.Dscal = cuda_blas_dscal;
blas.Dgemv = cuda_blas_dgemv;
blas.Dtpsv = cuda_blas_dtpsv;
nlBlasResetStats(&blas);
initialized = NL_TRUE;
}
return &blas;
}
/******* extracted from nl_api.c *******/
static NLSparseMatrix* nlGetCurrentSparseMatrix() {
NLSparseMatrix* result = NULL;
switch(nlCurrentContext->matrix_mode) {
case NL_STIFFNESS_MATRIX: {
nl_assert(nlCurrentContext->M != NULL);
nl_assert(nlCurrentContext->M->type == NL_MATRIX_SPARSE_DYNAMIC);
result = (NLSparseMatrix*)(nlCurrentContext->M);
} break;
case NL_MASS_MATRIX: {
nl_assert(nlCurrentContext->B != NULL);
nl_assert(nlCurrentContext->B->type == NL_MATRIX_SPARSE_DYNAMIC);
result = (NLSparseMatrix*)(nlCurrentContext->B);
} break;
default:
nl_assert_not_reached;
}
return result;
}
NLboolean nlInitExtension(const char* extension) {
if(!strcmp(extension, "SUPERLU")) {
return nlInitExtension_SUPERLU();
} else if(!strcmp(extension, "CHOLMOD")) {
return nlInitExtension_CHOLMOD();
} else if(!strcmp(extension, "ARPACK")) {
/*
* SUPERLU is needed by OpenNL's ARPACK driver
* (factorizes the matrix for the shift-invert spectral
* transform).
*/
return nlInitExtension_SUPERLU() && nlInitExtension_ARPACK();
} else if(!strcmp(extension, "MKL")) {
return nlInitExtension_MKL();
} else if(!strcmp(extension, "CUDA")) {
return nlInitExtension_CUDA();
}
return NL_FALSE;
}
NLboolean nlExtensionIsInitialized(const char* extension) {
if(!strcmp(extension, "SUPERLU")) {
return nlExtensionIsInitialized_SUPERLU();
} else if(!strcmp(extension, "CHOLMOD")) {
return nlExtensionIsInitialized_CHOLMOD();
} else if(!strcmp(extension, "ARPACK")) {
/*
* SUPERLU is needed by OpenNL's ARPACK driver
* (factorizes the matrix for the shift-invert spectral
* transform).
*/
return nlExtensionIsInitialized_SUPERLU() && nlExtensionIsInitialized_ARPACK();
} else if(!strcmp(extension, "MKL")) {
return nlExtensionIsInitialized_MKL();
} else if(!strcmp(extension, "CUDA")) {
return nlExtensionIsInitialized_CUDA();
}
return NL_FALSE;
}
void nlInitialize(int argc, char** argv) {
int i=0;
char* ptr=NULL;
char extension[255];
/* Find all the arguments with the form:
* nl:<extension>=true|false
* and try to activate the corresponding extensions.
*/
for(i=1; i<argc; ++i) {
ptr = strstr(argv[i],"=true");
if(!strncmp(argv[i], "nl:", 3) &&
(strlen(argv[i]) > 3) &&
(ptr != NULL)) {
strncpy(extension, argv[i]+3, (size_t)(ptr-argv[i]-3));
extension[(size_t)(ptr-argv[i]-3)] = '\0';
if(nlInitExtension(extension)) {
nl_fprintf(stdout,"OpenNL %s: initialized\n", extension);
} else {
nl_fprintf(stderr,"OpenNL %s: could not initialize\n", extension);
}
}
}
}
/* Get/Set parameters */
void nlSolverParameterd(NLenum pname, NLdouble param) {
nlCheckState(NL_STATE_INITIAL);
switch(pname) {
case NL_THRESHOLD: {
nl_assert(param >= 0);
nlCurrentContext->threshold = (NLdouble)param;
nlCurrentContext->threshold_defined = NL_TRUE;
} break;
case NL_OMEGA: {
nl_range_assert(param,1.0,2.0);
nlCurrentContext->omega = (NLdouble)param;
} break;
default: {
nlError("nlSolverParameterd","Invalid parameter");
nl_assert_not_reached;
}
}
}
void nlSolverParameteri(NLenum pname, NLint param) {
nlCheckState(NL_STATE_INITIAL);
switch(pname) {
case NL_SOLVER: {
nlCurrentContext->solver = (NLenum)param;
} break;
case NL_NB_VARIABLES: {
nl_assert(param > 0);
nlCurrentContext->nb_variables = (NLuint)param;
} break;
case NL_NB_SYSTEMS: {
nl_assert(param > 0);
nlCurrentContext->nb_systems = (NLuint)param;
} break;
case NL_LEAST_SQUARES: {
nlCurrentContext->least_squares = (NLboolean)param;
} break;
case NL_MAX_ITERATIONS: {
nl_assert(param > 0);
nlCurrentContext->max_iterations = (NLuint)param;
nlCurrentContext->max_iterations_defined = NL_TRUE;
} break;
case NL_SYMMETRIC: {
nlCurrentContext->symmetric = (NLboolean)param;
} break;
case NL_INNER_ITERATIONS: {
nl_assert(param > 0);
nlCurrentContext->inner_iterations = (NLuint)param;
} break;
case NL_PRECONDITIONER: {
nlCurrentContext->preconditioner = (NLuint)param;
nlCurrentContext->preconditioner_defined = NL_TRUE;
} break;
default: {
nlError("nlSolverParameteri","Invalid parameter");
nl_assert_not_reached;
}
}
}
void nlGetBooleanv(NLenum pname, NLboolean* params) {
switch(pname) {
case NL_LEAST_SQUARES: {
*params = nlCurrentContext->least_squares;
} break;
case NL_SYMMETRIC: {
*params = nlCurrentContext->symmetric;
} break;
default: {
nlError("nlGetBooleanv","Invalid parameter");
nl_assert_not_reached;
}
}
}
void nlGetDoublev(NLenum pname, NLdouble* params) {
switch(pname) {
case NL_THRESHOLD: {
*params = nlCurrentContext->threshold;
} break;
case NL_OMEGA: {
*params = nlCurrentContext->omega;
} break;
case NL_ERROR: {
*params = nlCurrentContext->error;
} break;
case NL_ELAPSED_TIME: {
*params = nlCurrentContext->elapsed_time;
} break;
case NL_GFLOPS: {
if(nlCurrentContext->elapsed_time == 0) {
*params = 0.0;
} else {
*params = (NLdouble)(nlCurrentContext->flops) /
(nlCurrentContext->elapsed_time * 1e9);
}
} break;
default: {
nlError("nlGetDoublev","Invalid parameter");
nl_assert_not_reached;
}
}
}
void nlGetIntegerv(NLenum pname, NLint* params) {
switch(pname) {
case NL_SOLVER: {
*params = (NLint)(nlCurrentContext->solver);
} break;
case NL_NB_VARIABLES: {
*params = (NLint)(nlCurrentContext->nb_variables);
} break;
case NL_NB_SYSTEMS: {
*params = (NLint)(nlCurrentContext->nb_systems);
} break;
case NL_LEAST_SQUARES: {
*params = (NLint)(nlCurrentContext->least_squares);
} break;
case NL_MAX_ITERATIONS: {
*params = (NLint)(nlCurrentContext->max_iterations);
} break;
case NL_SYMMETRIC: {
*params = (NLint)(nlCurrentContext->symmetric);
} break;
case NL_USED_ITERATIONS: {
*params = (NLint)(nlCurrentContext->used_iterations);
} break;
case NL_PRECONDITIONER: {
*params = (NLint)(nlCurrentContext->preconditioner);
} break;
case NL_NNZ: {
*params = (NLint)(nlMatrixNNZ(nlCurrentContext->M));
} break;
default: {
nlError("nlGetIntegerv","Invalid parameter");
nl_assert_not_reached;
}
}
}
/* Enable / Disable */
void nlEnable(NLenum pname) {
switch(pname) {
case NL_NORMALIZE_ROWS: {
nl_assert(nlCurrentContext->state != NL_STATE_ROW);
nlCurrentContext->normalize_rows = NL_TRUE;
} break;
case NL_VERBOSE: {
nlCurrentContext->verbose = NL_TRUE;
} break;
case NL_VARIABLES_BUFFER: {
nlCurrentContext->user_variable_buffers = NL_TRUE;
} break;
default: {
nlError("nlEnable","Invalid parameter");
nl_assert_not_reached;
}
}
}
void nlDisable(NLenum pname) {
switch(pname) {
case NL_NORMALIZE_ROWS: {
nl_assert(nlCurrentContext->state != NL_STATE_ROW);
nlCurrentContext->normalize_rows = NL_FALSE;
} break;
case NL_VERBOSE: {
nlCurrentContext->verbose = NL_FALSE;
} break;
case NL_VARIABLES_BUFFER: {
nlCurrentContext->user_variable_buffers = NL_FALSE;
} break;
default: {
nlError("nlDisable","Invalid parameter");
nl_assert_not_reached;
}
}
}
NLboolean nlIsEnabled(NLenum pname) {
NLboolean result = NL_FALSE;
switch(pname) {
case NL_NORMALIZE_ROWS: {
result = nlCurrentContext->normalize_rows;
} break;
case NL_VERBOSE: {
result = nlCurrentContext->verbose;
} break;
case NL_VARIABLES_BUFFER: {
result = nlCurrentContext->user_variable_buffers;
} break;
default: {
nlError("nlIsEnables","Invalid parameter");
nl_assert_not_reached;
}
}
return result;
}
/* NL functions */
void nlSetFunction(NLenum pname, NLfunc param) {
switch(pname) {
case NL_FUNC_SOLVER:
nlCurrentContext->solver_func = (NLSolverFunc)(param);
nlCurrentContext->solver = NL_SOLVER_USER;
break;
case NL_FUNC_MATRIX:
nlDeleteMatrix(nlCurrentContext->M);
nlCurrentContext->M = nlMatrixNewFromFunction(
nlCurrentContext->n, nlCurrentContext->n,
(NLMatrixFunc)param
);
break;
case NL_FUNC_PRECONDITIONER:
nlDeleteMatrix(nlCurrentContext->P);
nlCurrentContext->P = nlMatrixNewFromFunction(
nlCurrentContext->n, nlCurrentContext->n,
(NLMatrixFunc)param
);
nlCurrentContext->preconditioner = NL_PRECOND_USER;
break;
case NL_FUNC_PROGRESS:
nlCurrentContext->progress_func = (NLProgressFunc)(param);
break;
default:
nlError("nlSetFunction","Invalid parameter");
nl_assert_not_reached;
}
}
void nlGetFunction(NLenum pname, NLfunc* param) {
switch(pname) {
case NL_FUNC_SOLVER:
*param = (NLfunc)(nlCurrentContext->solver_func);
break;
case NL_FUNC_MATRIX:
*param = (NLfunc)(nlMatrixGetFunction(nlCurrentContext->M));
break;
case NL_FUNC_PRECONDITIONER:
*param = (NLfunc)(nlMatrixGetFunction(nlCurrentContext->P));
break;
default:
nlError("nlGetFunction","Invalid parameter");
nl_assert_not_reached;
}
}
/* Get/Set Lock/Unlock variables */
void nlSetVariable(NLuint index, NLdouble value) {
nlCheckState(NL_STATE_SYSTEM);
nl_debug_range_assert(index, 0, nlCurrentContext->nb_variables - 1);
NL_BUFFER_ITEM(nlCurrentContext->variable_buffer[0],index) = value;
}
void nlMultiSetVariable(NLuint index, NLuint system, NLdouble value) {
nlCheckState(NL_STATE_SYSTEM);
nl_debug_range_assert(index, 0, nlCurrentContext->nb_variables-1);
nl_debug_range_assert(system, 0, nlCurrentContext->nb_systems-1);
NL_BUFFER_ITEM(nlCurrentContext->variable_buffer[system],index) = value;
}
NLdouble nlGetVariable(NLuint index) {
nl_assert(nlCurrentContext->state != NL_STATE_INITIAL);
nl_debug_range_assert(index, 0, nlCurrentContext->nb_variables - 1);
return NL_BUFFER_ITEM(nlCurrentContext->variable_buffer[0],index);
}
NLdouble nlMultiGetVariable(NLuint index, NLuint system) {
nl_assert(nlCurrentContext->state != NL_STATE_INITIAL);
nl_debug_range_assert(index, 0, nlCurrentContext->nb_variables-1);
nl_debug_range_assert(system, 0, nlCurrentContext->nb_systems-1);
return NL_BUFFER_ITEM(nlCurrentContext->variable_buffer[system],index);
}
void nlLockVariable(NLuint index) {
nlCheckState(NL_STATE_SYSTEM);
nl_debug_range_assert(index, 0, nlCurrentContext->nb_variables - 1);
nlCurrentContext->variable_is_locked[index] = NL_TRUE;
}
void nlUnlockVariable(NLuint index) {
nlCheckState(NL_STATE_SYSTEM);
nl_debug_range_assert(index, 0, nlCurrentContext->nb_variables - 1);
nlCurrentContext->variable_is_locked[index] = NL_FALSE;
}
NLboolean nlVariableIsLocked(NLuint index) {
nl_assert(nlCurrentContext->state != NL_STATE_INITIAL);
nl_debug_range_assert(index, 0, nlCurrentContext->nb_variables - 1);
return nlCurrentContext->variable_is_locked[index];
}
/* System construction */
static void nlVariablesToVector() {
NLuint n=nlCurrentContext->n;
NLuint k,i,index;
NLdouble value;
nl_assert(nlCurrentContext->x != NULL);
for(k=0; k<nlCurrentContext->nb_systems; ++k) {
for(i=0; i<nlCurrentContext->nb_variables; ++i) {
if(!nlCurrentContext->variable_is_locked[i]) {
index = nlCurrentContext->variable_index[i];
nl_assert(index < nlCurrentContext->n);
value = NL_BUFFER_ITEM(nlCurrentContext->variable_buffer[k],i);
nlCurrentContext->x[index+k*n] = value;
}
}
}
}
static void nlVectorToVariables() {
NLuint n=nlCurrentContext->n;
NLuint k,i,index;
NLdouble value;
nl_assert(nlCurrentContext->x != NULL);
for(k=0; k<nlCurrentContext->nb_systems; ++k) {
for(i=0; i<nlCurrentContext->nb_variables; ++i) {
if(!nlCurrentContext->variable_is_locked[i]) {
index = nlCurrentContext->variable_index[i];
nl_assert(index < nlCurrentContext->n);
value = nlCurrentContext->x[index+k*n];
NL_BUFFER_ITEM(nlCurrentContext->variable_buffer[k],i) = value;
}
}
}
}
static void nlBeginSystem() {
NLuint k;
nlTransition(NL_STATE_INITIAL, NL_STATE_SYSTEM);
nl_assert(nlCurrentContext->nb_variables > 0);
nlCurrentContext->variable_buffer = NL_NEW_ARRAY(
NLBufferBinding, nlCurrentContext->nb_systems
);
if(nlCurrentContext->user_variable_buffers) {
nlCurrentContext->variable_value = NULL;
} else {
nlCurrentContext->variable_value = NL_NEW_ARRAY(
NLdouble,
nlCurrentContext->nb_variables * nlCurrentContext->nb_systems
);
for(k=0; k<nlCurrentContext->nb_systems; ++k) {
nlCurrentContext->variable_buffer[k].base_address =
nlCurrentContext->variable_value +
k * nlCurrentContext->nb_variables;
nlCurrentContext->variable_buffer[k].stride = sizeof(NLdouble);
}
}
nlCurrentContext->variable_is_locked = NL_NEW_ARRAY(
NLboolean, nlCurrentContext->nb_variables
);
nlCurrentContext->variable_index = NL_NEW_ARRAY(
NLuint, nlCurrentContext->nb_variables
);
}
static void nlEndSystem() {
nlTransition(NL_STATE_MATRIX_CONSTRUCTED, NL_STATE_SYSTEM_CONSTRUCTED);
}
static void nlInitializeM() {
NLuint i;
NLuint n = 0;
NLenum storage = NL_MATRIX_STORE_ROWS;
for(i=0; i<nlCurrentContext->nb_variables; i++) {
if(!nlCurrentContext->variable_is_locked[i]) {
nlCurrentContext->variable_index[i] = n;
n++;
} else {
nlCurrentContext->variable_index[i] = (NLuint)~0;
}
}
nlCurrentContext->n = n;
/*
* If the user trusts OpenNL and has left solver as NL_SOLVER_DEFAULT,
* then we setup reasonable parameters for him.
*/
if(nlCurrentContext->solver == NL_SOLVER_DEFAULT) {
if(nlCurrentContext->least_squares || nlCurrentContext->symmetric) {
nlCurrentContext->solver = NL_CG;
if(!nlCurrentContext->preconditioner_defined) {
nlCurrentContext->preconditioner = NL_PRECOND_JACOBI;
}
} else {
nlCurrentContext->solver = NL_BICGSTAB;
}
if(!nlCurrentContext->max_iterations_defined) {
nlCurrentContext->max_iterations = n*5;
}
if(!nlCurrentContext->threshold_defined) {
nlCurrentContext->threshold = 1e-6;
}
}
/* SSOR preconditioner requires rows and columns */
if(nlCurrentContext->preconditioner == NL_PRECOND_SSOR) {
storage = (storage | NL_MATRIX_STORE_COLUMNS);
}
/* a least squares problem results in a symmetric matrix */
if(nlCurrentContext->least_squares) {
nlCurrentContext->symmetric = NL_TRUE;
}
if(
nlCurrentContext->symmetric &&
nlCurrentContext->preconditioner == NL_PRECOND_SSOR
) {
/*
* For now, only used with SSOR preconditioner, because
* for other modes it is either unsupported (SUPERLU) or
* causes performance loss (non-parallel sparse SpMV)
*/
storage = (storage | NL_MATRIX_STORE_SYMMETRIC);
}
nlCurrentContext->M = (NLMatrix)(NL_NEW(NLSparseMatrix));
nlSparseMatrixConstruct(
(NLSparseMatrix*)(nlCurrentContext->M), n, n, storage
);
nlCurrentContext->x = NL_NEW_ARRAY(
NLdouble, n*nlCurrentContext->nb_systems
);
nlCurrentContext->b = NL_NEW_ARRAY(
NLdouble, n*nlCurrentContext->nb_systems
);
nlVariablesToVector();
nlRowColumnConstruct(&nlCurrentContext->af);
nlRowColumnConstruct(&nlCurrentContext->al);
nlCurrentContext->right_hand_side = NL_NEW_ARRAY(
double, nlCurrentContext->nb_systems
);
nlCurrentContext->current_row = 0;
}
static void nlEndMatrix() {
nlTransition(NL_STATE_MATRIX, NL_STATE_MATRIX_CONSTRUCTED);
nlRowColumnClear(&nlCurrentContext->af);
nlRowColumnClear(&nlCurrentContext->al);
if(!nlCurrentContext->least_squares) {
nl_assert(
nlCurrentContext->ij_coefficient_called || (
nlCurrentContext->current_row ==
nlCurrentContext->n
)
);
}
}
static void nlBeginRow() {
nlTransition(NL_STATE_MATRIX, NL_STATE_ROW);
nlRowColumnZero(&nlCurrentContext->af);
nlRowColumnZero(&nlCurrentContext->al);
}
static void nlScaleRow(NLdouble s) {
NLRowColumn* af = &nlCurrentContext->af;
NLRowColumn* al = &nlCurrentContext->al;
NLuint nf = af->size;
NLuint nl = al->size;
NLuint i,k;
for(i=0; i<nf; i++) {
af->coeff[i].value *= s;
}
for(i=0; i<nl; i++) {
al->coeff[i].value *= s;
}
for(k=0; k<nlCurrentContext->nb_systems; ++k) {
nlCurrentContext->right_hand_side[k] *= s;
}
}
static void nlNormalizeRow(NLdouble weight) {
NLRowColumn* af = &nlCurrentContext->af;
NLRowColumn* al = &nlCurrentContext->al;
NLuint nf = af->size;
NLuint nl = al->size;
NLuint i;
NLdouble norm = 0.0;
for(i=0; i<nf; i++) {
norm += af->coeff[i].value * af->coeff[i].value;
}
for(i=0; i<nl; i++) {
norm += al->coeff[i].value * al->coeff[i].value;
}
norm = sqrt(norm);
nlScaleRow(weight / norm);
}
static void nlEndRow() {
NLRowColumn* af = &nlCurrentContext->af;
NLRowColumn* al = &nlCurrentContext->al;
NLSparseMatrix* M = nlGetCurrentSparseMatrix();
NLdouble* b = nlCurrentContext->b;
NLuint nf = af->size;
NLuint nl = al->size;
NLuint n = nlCurrentContext->n;
NLuint current_row = nlCurrentContext->current_row;
NLuint i,j,jj;
NLdouble S;
NLuint k;
nlTransition(NL_STATE_ROW, NL_STATE_MATRIX);
if(nlCurrentContext->normalize_rows) {
nlNormalizeRow(nlCurrentContext->row_scaling);
} else if(nlCurrentContext->row_scaling != 1.0) {
nlScaleRow(nlCurrentContext->row_scaling);
}
/*
* if least_squares : we want to solve
* A'A x = A'b
*/
if(nlCurrentContext->least_squares) {
for(i=0; i<nf; i++) {
for(j=0; j<nf; j++) {
nlSparseMatrixAdd(
M, af->coeff[i].index, af->coeff[j].index,
af->coeff[i].value * af->coeff[j].value
);
}
}
for(k=0; k<nlCurrentContext->nb_systems; ++k) {
S = -nlCurrentContext->right_hand_side[k];
for(jj=0; jj<nl; ++jj) {
j = al->coeff[jj].index;
S += al->coeff[jj].value *
NL_BUFFER_ITEM(nlCurrentContext->variable_buffer[k],j);
}
for(jj=0; jj<nf; jj++) {
b[ k*n+af->coeff[jj].index ] -= af->coeff[jj].value * S;
}
}
} else {
for(jj=0; jj<nf; ++jj) {
nlSparseMatrixAdd(
M, current_row, af->coeff[jj].index, af->coeff[jj].value
);
}
for(k=0; k<nlCurrentContext->nb_systems; ++k) {
b[k*n+current_row] = nlCurrentContext->right_hand_side[k];
for(jj=0; jj<nl; ++jj) {
j = al->coeff[jj].index;
b[k*n+current_row] -= al->coeff[jj].value *
NL_BUFFER_ITEM(nlCurrentContext->variable_buffer[k],j);
}
}
}
nlCurrentContext->current_row++;
for(k=0; k<nlCurrentContext->nb_systems; ++k) {
nlCurrentContext->right_hand_side[k] = 0.0;
}
nlCurrentContext->row_scaling = 1.0;
}
void nlCoefficient(NLuint index, NLdouble value) {
nlCheckState(NL_STATE_ROW);
nl_debug_range_assert(index, 0, nlCurrentContext->nb_variables - 1);
if(nlCurrentContext->variable_is_locked[index]) {
/*
* Note: in al, indices are NLvariable indices,
* within [0..nb_variables-1]
*/
nlRowColumnAppend(&(nlCurrentContext->al), index, value);
} else {
/*
* Note: in af, indices are system indices,
* within [0..n-1]
*/
nlRowColumnAppend(
&(nlCurrentContext->af),
nlCurrentContext->variable_index[index], value
);
}
}
void nlAddIJCoefficient(NLuint i, NLuint j, NLdouble value) {
NLSparseMatrix* M = nlGetCurrentSparseMatrix();
nlCheckState(NL_STATE_MATRIX);
nl_debug_range_assert(i, 0, nlCurrentContext->nb_variables - 1);
nl_debug_range_assert(j, 0, nlCurrentContext->nb_variables - 1);
#ifdef NL_DEBUG
for(NLuint i=0; i<nlCurrentContext->nb_variables; ++i) {
nl_debug_assert(!nlCurrentContext->variable_is_locked[i]);
}
#endif
nlSparseMatrixAdd(M, i, j, value);
nlCurrentContext->ij_coefficient_called = NL_TRUE;
}
void nlAddIRightHandSide(NLuint i, NLdouble value) {
nlCheckState(NL_STATE_MATRIX);
nl_debug_range_assert(i, 0, nlCurrentContext->nb_variables - 1);
#ifdef NL_DEBUG
for(NLuint i=0; i<nlCurrentContext->nb_variables; ++i) {
nl_debug_assert(!nlCurrentContext->variable_is_locked[i]);
}
#endif
nlCurrentContext->b[i] += value;
nlCurrentContext->ij_coefficient_called = NL_TRUE;
}
void nlMultiAddIRightHandSide(NLuint i, NLuint k, NLdouble value) {
NLuint n = nlCurrentContext->n;
nlCheckState(NL_STATE_MATRIX);
nl_debug_range_assert(i, 0, nlCurrentContext->nb_variables - 1);
nl_debug_range_assert(k, 0, nlCurrentContext->nb_systems - 1);
#ifdef NL_DEBUG
for(NLuint i=0; i<nlCurrentContext->nb_variables; ++i) {
nl_debug_assert(!nlCurrentContext->variable_is_locked[i]);
}
#endif
nlCurrentContext->b[i + k*n] += value;
nlCurrentContext->ij_coefficient_called = NL_TRUE;
}
void nlRightHandSide(NLdouble value) {
nlCurrentContext->right_hand_side[0] = value;
}
void nlMultiRightHandSide(NLuint k, NLdouble value) {
nl_debug_range_assert(k, 0, nlCurrentContext->nb_systems - 1);
nlCurrentContext->right_hand_side[k] = value;
}
void nlRowScaling(NLdouble value) {
nlCheckState(NL_STATE_MATRIX);
nlCurrentContext->row_scaling = value;
}
void nlBegin(NLenum prim) {
switch(prim) {
case NL_SYSTEM: {
nlBeginSystem();
} break;
case NL_MATRIX: {
nlTransition(NL_STATE_SYSTEM, NL_STATE_MATRIX);
if(
nlCurrentContext->matrix_mode == NL_STIFFNESS_MATRIX &&
nlCurrentContext->M == NULL
) {
nlInitializeM();
}
} break;
case NL_ROW: {
nlBeginRow();
} break;
default: {
nl_assert_not_reached;
}
}
}
void nlEnd(NLenum prim) {
switch(prim) {
case NL_SYSTEM: {
nlEndSystem();
} break;
case NL_MATRIX: {
nlEndMatrix();
} break;
case NL_ROW: {
nlEndRow();
} break;
default: {
nl_assert_not_reached;
}
}
}
/* nlSolve() driver routine */
NLboolean nlSolve() {
NLboolean result;
nlCheckState(NL_STATE_SYSTEM_CONSTRUCTED);
nlCurrentContext->start_time = nlCurrentTime();
nlCurrentContext->elapsed_time = 0.0;
nlCurrentContext->flops = 0;
result = nlCurrentContext->solver_func();
nlVectorToVariables();
nlCurrentContext->elapsed_time = nlCurrentTime() - nlCurrentContext->start_time;
nlTransition(NL_STATE_SYSTEM_CONSTRUCTED, NL_STATE_SOLVED);
return result;
}
void nlUpdateRightHandSide(NLdouble* values) {
/*
* If we are in the solved state, get back to the
* constructed state.
*/
nl_assert(nlCurrentContext->nb_systems == 1);
if(nlCurrentContext->state == NL_STATE_SOLVED) {
nlTransition(NL_STATE_SOLVED, NL_STATE_SYSTEM_CONSTRUCTED);
}
nlCheckState(NL_STATE_SYSTEM_CONSTRUCTED);
memcpy(nlCurrentContext->x, values, nlCurrentContext->n * sizeof(double));
}
/* Buffers management */
void nlBindBuffer(
NLenum buffer, NLuint k, void* addr, NLuint stride
) {
nlCheckState(NL_STATE_SYSTEM);
nl_assert(nlIsEnabled(buffer));
nl_assert(buffer == NL_VARIABLES_BUFFER);
nl_assert(k<nlCurrentContext->nb_systems);
if(stride == 0) {
stride = sizeof(NLdouble);
}
nlCurrentContext->variable_buffer[k].base_address = addr;
nlCurrentContext->variable_buffer[k].stride = stride;
}
/* Eigen solver */
void nlMatrixMode(NLenum matrix) {
NLuint n = 0;
NLuint i;
nl_assert(
nlCurrentContext->state == NL_STATE_SYSTEM ||
nlCurrentContext->state == NL_STATE_MATRIX_CONSTRUCTED
);
nlCurrentContext->state = NL_STATE_SYSTEM;
nlCurrentContext->matrix_mode = matrix;
nlCurrentContext->current_row = 0;
nlCurrentContext->ij_coefficient_called = NL_FALSE;
switch(matrix) {
case NL_STIFFNESS_MATRIX: {
/* Stiffness matrix is already constructed. */
} break ;
case NL_MASS_MATRIX: {
if(nlCurrentContext->B == NULL) {
for(i=0; i<nlCurrentContext->nb_variables; ++i) {
if(!nlCurrentContext->variable_is_locked[i]) {
++n;
}
}
nlCurrentContext->B = (NLMatrix)(NL_NEW(NLSparseMatrix));
nlSparseMatrixConstruct(
(NLSparseMatrix*)(nlCurrentContext->B),
n, n, NL_MATRIX_STORE_ROWS
);
}
} break ;
default:
nl_assert_not_reached;
}
}
void nlEigenSolverParameterd(
NLenum pname, NLdouble val
) {
switch(pname) {
case NL_EIGEN_SHIFT: {
nlCurrentContext->eigen_shift = val;
} break;
case NL_EIGEN_THRESHOLD: {
nlSolverParameterd(pname, val);
} break;
default:
nl_assert_not_reached;
}
}
void nlEigenSolverParameteri(
NLenum pname, NLint val
) {
switch(pname) {
case NL_EIGEN_SOLVER: {
nlCurrentContext->eigen_solver = (NLenum)val;
} break;
case NL_SYMMETRIC:
case NL_NB_VARIABLES:
case NL_NB_EIGENS:
case NL_EIGEN_MAX_ITERATIONS: {
nlSolverParameteri(pname, val);
} break;
default:
nl_assert_not_reached;
}
}
void nlEigenSolve() {
if(nlCurrentContext->eigen_value == NULL) {
nlCurrentContext->eigen_value = NL_NEW_ARRAY(
NLdouble,nlCurrentContext->nb_systems
);
}
nlMatrixCompress(&nlCurrentContext->M);
if(nlCurrentContext->B != NULL) {
nlMatrixCompress(&nlCurrentContext->B);
}
switch(nlCurrentContext->eigen_solver) {
case NL_ARPACK_EXT:
nlEigenSolve_ARPACK();
break;
default:
nl_assert_not_reached;
}
}
double nlGetEigenValue(NLuint i) {
nl_debug_assert(i < nlCurrentContext->nb_variables);
return nlCurrentContext->eigen_value[i];
}
|
FullyDistVec.h | /****************************************************************/
/* Parallel Combinatorial BLAS Library (for Graph Computations) */
/* version 1.2 -------------------------------------------------*/
/* date: 10/06/2011 --------------------------------------------*/
/* authors: Aydin Buluc (abuluc@lbl.gov), Adam Lugowski --------*/
/****************************************************************/
/*
Copyright (c) 2011, Aydin Buluc
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef _FULLY_DIST_VEC_H_
#define _FULLY_DIST_VEC_H_
#include <iostream>
#include <fstream>
#include <vector>
#include <utility>
#include <iterator>
#include "CombBLAS.h"
#include "CommGrid.h"
#include "FullyDist.h"
#include "Exception.h"
template <class IT, class NT>
class FullyDistSpVec;
template <class IT, class NT, class DER>
class SpParMat;
template <class IT>
class DistEdgeList;
template <class IU, class NU>
class DenseVectorLocalIterator;
// ABAB: As opposed to SpParMat, IT here is used to encode global size and global indices;
// therefore it can not be 32-bits, in general.
template <class IT, class NT>
class FullyDistVec: public FullyDist<IT,NT, typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type >
{
public:
FullyDistVec ( );
FullyDistVec ( IT globallen, NT initval);
FullyDistVec ( shared_ptr<CommGrid> grid);
FullyDistVec ( shared_ptr<CommGrid> grid, IT globallen, NT initval);
FullyDistVec ( const FullyDistSpVec<IT, NT> & rhs ); // Sparse -> Dense conversion constructor
FullyDistVec ( const vector<NT> & fillarr, shared_ptr<CommGrid> grid ); // initialize a FullyDistVec with a vector from each processor
template <class ITRHS, class NTRHS>
FullyDistVec ( const FullyDistVec<ITRHS, NTRHS>& rhs ); // type converter constructor
class ScalarReadSaveHandler
{
public:
NT getNoNum(IT index) { return static_cast<NT>(1); }
template <typename c, typename t>
NT read(std::basic_istream<c,t>& is, IT index)
{
NT v;
is >> v;
return v;
}
template <typename c, typename t>
void save(std::basic_ostream<c,t>& os, const NT& v, IT index)
{
os << v;
}
};
template <class HANDLER>
ifstream& ReadDistribute (ifstream& infile, int master, HANDLER handler);
ifstream& ReadDistribute (ifstream& infile, int master) { return ReadDistribute(infile, master, ScalarReadSaveHandler()); }
template <class HANDLER>
void SaveGathered(ofstream& outfile, int master, HANDLER handler, bool printProcSplits = false);
void SaveGathered(ofstream& outfile, int master) { SaveGathered(outfile, master, ScalarReadSaveHandler(), false); }
template <class ITRHS, class NTRHS>
FullyDistVec<IT,NT> & operator=(const FullyDistVec< ITRHS,NTRHS > & rhs); // assignment with type conversion
FullyDistVec<IT,NT> & operator=(const FullyDistVec<IT,NT> & rhs); //!< Actual assignment operator
FullyDistVec<IT,NT> & operator=(const FullyDistSpVec<IT,NT> & rhs); //!< FullyDistSpVec->FullyDistVec conversion operator
FullyDistVec<IT,NT> & operator=(const DenseParVec<IT,NT> & rhs); //!< DenseParVec->FullyDistVec conversion operator
FullyDistVec<IT,NT> operator() (const FullyDistVec<IT,IT> & ri) const; //<! subsref
//! like operator=, but instead of making a deep copy it just steals the contents.
//! Useful for places where the "victim" will be distroyed immediately after the call.
FullyDistVec<IT,NT> & stealFrom(FullyDistVec<IT,NT> & victim);
FullyDistVec<IT,NT> & operator+=(const FullyDistSpVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator+=(const FullyDistVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator-=(const FullyDistSpVec<IT,NT> & rhs);
FullyDistVec<IT,NT> & operator-=(const FullyDistVec<IT,NT> & rhs);
bool operator==(const FullyDistVec<IT,NT> & rhs) const;
void SetElement (IT indx, NT numx); // element-wise assignment
void SetLocalElement(IT index, NT value) { arr[index] = value; }; // no checks, local index
NT GetElement (IT indx) const; // element-wise fetch
NT operator[](IT indx) const // more c++ like API
{
return GetElement(indx);
}
void Set(const FullyDistSpVec< IT,NT > & rhs);
void iota(IT globalsize, NT first);
void RandPerm(); // randomly permute the vector
FullyDistVec<IT,IT> sort(); // sort and return the permutation
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::LengthUntil;
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::TotalLength;
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::Owner;
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::MyLocLength;
IT LocArrSize() const { return arr.size(); } // = MyLocLength() once arr is resized
template <typename _Predicate>
FullyDistSpVec<IT,NT> Find(_Predicate pred) const; //!< Return the elements for which pred is true
template <typename _Predicate>
FullyDistVec<IT,IT> FindInds(_Predicate pred) const; //!< Return the indices where pred is true
template <typename _Predicate>
IT Count(_Predicate pred) const; //!< Return the number of elements for which pred is true
template <typename _UnaryOperation>
void Apply(_UnaryOperation __unary_op)
{
transform(arr.begin(), arr.end(), arr.begin(), __unary_op);
}
template <typename _BinaryOperation>
void ApplyInd(_BinaryOperation __binary_op)
{
IT offset = LengthUntil();
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(IT i=0; (unsigned)i < arr.size(); ++i)
arr[i] = __binary_op(arr[i], i + offset);
}
template <typename _UnaryOperation, typename IRRELEVANT_NT>
void Apply(_UnaryOperation __unary_op, const FullyDistSpVec<IT,IRRELEVANT_NT>& mask);
// extended callback versions
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, const bool useExtendedBinOp);
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, bool applyNulls, NT2 nullValue, const bool useExtendedBinOp);
// plain fallback versions
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op)
{
EWiseApply(other,
EWiseExtToPlainAdapter<NT, NT, NT2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NT, NT2, _BinaryPredicate>(_do_op),
true);
}
template <typename _BinaryOperation, typename _BinaryPredicate, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, _BinaryPredicate _do_op, bool applyNulls, NT2 nullValue)
{
EWiseApply(other,
EWiseExtToPlainAdapter<NT, NT, NT2, _BinaryOperation>(__binary_op),
EWiseExtToPlainAdapter<bool, NT, NT2, _BinaryPredicate>(_do_op),
applyNulls, nullValue, true);
}
template <typename T1, typename T2>
class retTrue {
public:
bool operator()(const T1& x, const T2& y)
{
return true;
}
};
template <typename _BinaryOperation, class NT2>
void EWiseApply(const FullyDistVec<IT,NT2> & other, _BinaryOperation __binary_op)
{
this->EWiseApply(other, __binary_op, retTrue<NT, NT2>());
}
template <typename _BinaryOperation, class NT2>
void EWiseApply(const FullyDistSpVec<IT,NT2> & other, _BinaryOperation __binary_op, bool applyNulls, NT2 nullValue)
{
this->EWiseApply(other, __binary_op, retTrue<NT, NT2>(), applyNulls, nullValue);
}
void PrintToFile(string prefix)
{
ofstream output;
commGrid->OpenDebugFile(prefix, output);
copy(arr.begin(), arr.end(), ostream_iterator<NT> (output, " "));
output << endl;
output.close();
}
void PrintInfo(string vectorname) const;
void DebugPrint();
shared_ptr<CommGrid> getcommgrid() const { return commGrid; }
template <typename _BinaryOperation>
NT Reduce(_BinaryOperation __binary_op, NT identity); //! Reduce can be used to implement max_element, for instance
template <typename OUT, typename _BinaryOperation, typename _UnaryOperation>
OUT Reduce(_BinaryOperation __binary_op, OUT default_val, _UnaryOperation __unary_op);
void SelectCandidates(double nver);
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::glen;
using FullyDist<IT,NT,typename CombBLAS::disable_if< CombBLAS::is_boolean<NT>::value, NT >::type>::commGrid;
private:
vector< NT > arr;
template <typename _BinaryOperation>
void EWise(const FullyDistVec<IT,NT> & rhs, _BinaryOperation __binary_op);
template <class IU, class NU>
friend class DenseParMat;
template <class IU, class NU, class UDER>
friend class SpParMat;
template <class IU, class NU>
friend class FullyDistVec;
template <class IU, class NU>
friend class FullyDistSpVec;
template <class IU, class NU>
friend class DenseVectorLocalIterator;
template <typename SR, typename IU, typename NUM, typename NUV, typename UDER>
friend FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote>
SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x );
template <typename IU, typename NU1, typename NU2>
friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote>
EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero);
template <typename IU, typename NU1, typename NU2, typename _BinaryOperation>
friend FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, typename promote_trait<NU1,NU2>::T_promote zero);
template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate>
friend FullyDistSpVec<IU,RET>
EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp);
template <typename IU>
friend void RenameVertices(DistEdgeList<IU> & DEL);
template <typename IU, typename NU>
friend FullyDistVec<IU,NU> Concatenate ( vector< FullyDistVec<IU,NU> > & vecs);
};
#include "FullyDistVec.cpp"
#endif
|
interp2.c | /*
* Academic License - for use in teaching, academic research, and meeting
* course requirements at degree granting institutions only. Not for
* government, commercial, or other organizational use.
*
* interp2.c
*
* Code generation for function 'interp2'
*
*/
/* Include files */
#include "interp2.h"
#include "eml_int_forloop_overflow_check.h"
#include "rt_nonfinite.h"
#include "shadowing_latlon_loop_data.h"
#include "shadowing_latlon_loop_emxutil.h"
#include "shadowing_latlon_loop_types.h"
#include "mwmathutil.h"
/* Variable Definitions */
static emlrtRSInfo gd_emlrtRSI = { 274,/* lineNo */
"interp2_local", /* fcnName */
"D:\\Program Files\\MATLAB\\toolbox\\eml\\lib\\matlab\\polyfun\\interp2.m"/* pathName */
};
static emlrtRTEInfo ig_emlrtRTEI = { 268,/* lineNo */
21, /* colNo */
"interp2", /* fName */
"D:\\Program Files\\MATLAB\\toolbox\\eml\\lib\\matlab\\polyfun\\interp2.m"/* pName */
};
/* Function Definitions */
void interp2_local(const emlrtStack *sp, const emxArray_real_T *V, const
emxArray_real_T *Xq, const emxArray_real_T *Yq,
emxArray_real_T *Vq)
{
jmp_buf * volatile emlrtJBStack;
emlrtStack b_st;
emlrtStack st;
real_T qx1;
real_T qx2;
real_T rx;
real_T ry;
real_T zx1y2;
int32_T ix;
int32_T ixmax;
int32_T iy;
int32_T iymax;
int32_T k;
int32_T ub_loop;
st.prev = sp;
st.tls = sp->tls;
b_st.prev = &st;
b_st.tls = st.tls;
ixmax = Vq->size[0];
Vq->size[0] = Xq->size[0];
emxEnsureCapacity_real_T(sp, Vq, ixmax, &ig_emlrtRTEI);
ixmax = V->size[1] - 1;
iymax = V->size[0] - 1;
st.site = &gd_emlrtRSI;
if ((1 <= Xq->size[0]) && (Xq->size[0] > 2147483646)) {
b_st.site = &x_emlrtRSI;
check_forloop_overflow_error(&b_st);
}
ub_loop = Xq->size[0] - 1;
emlrtEnterParallelRegion(sp, omp_in_parallel());
emlrtPushJmpBuf(sp, &emlrtJBStack);
#pragma omp parallel for \
num_threads(emlrtAllocRegionTLSs(sp->tls, omp_in_parallel(), omp_get_max_threads(), omp_get_num_procs())) \
private(ix,iy,ry,qx1,zx1y2,qx2,rx)
for (k = 0; k <= ub_loop; k++) {
if ((Xq->data[k] >= 1.0) && (Xq->data[k] <= V->size[1]) && (Yq->data[k] >=
1.0) && (Yq->data[k] <= V->size[0])) {
if (Xq->data[k] <= 1.0) {
ix = 1;
} else if (Xq->data[k] <= ixmax) {
ix = (int32_T)muDoubleScalarFloor(Xq->data[k]);
} else {
ix = ixmax;
}
if (Yq->data[k] <= 1.0) {
iy = 1;
} else if (Yq->data[k] <= iymax) {
iy = (int32_T)muDoubleScalarFloor(Yq->data[k]);
} else {
iy = iymax;
}
ry = V->data[(iy + V->size[0] * (ix - 1)) - 1];
qx1 = V->data[(iy + V->size[0] * ix) - 1];
zx1y2 = V->data[iy + V->size[0] * (ix - 1)];
qx2 = V->data[iy + V->size[0] * ix];
if (Xq->data[k] == ix) {
qx1 = ry;
qx2 = zx1y2;
} else {
if (!(Xq->data[k] == (real_T)ix + 1.0)) {
rx = (Xq->data[k] - (real_T)ix) / (((real_T)ix + 1.0) - (real_T)ix);
if (ry == qx1) {
qx1 = ry;
} else {
qx1 = (1.0 - rx) * ry + rx * qx1;
}
if (zx1y2 == qx2) {
qx2 = zx1y2;
} else {
qx2 = (1.0 - rx) * zx1y2 + rx * qx2;
}
}
}
if ((Yq->data[k] == iy) || (qx1 == qx2)) {
Vq->data[k] = qx1;
} else if (Yq->data[k] == (real_T)iy + 1.0) {
Vq->data[k] = qx2;
} else {
ry = (Yq->data[k] - (real_T)iy) / (((real_T)iy + 1.0) - (real_T)iy);
Vq->data[k] = (1.0 - ry) * qx1 + ry * qx2;
}
} else {
Vq->data[k] = rtNaN;
}
}
emlrtPopJmpBuf(sp, &emlrtJBStack);
emlrtExitParallelRegion(sp, omp_in_parallel());
}
/* End of code generation (interp2.c) */
|
parallel-reduction2.c | #include<assert.h>
#include<omp.h>
#include<stdio.h>
int main(void)
{
int i =100, sum=100;
int thread_num;
#pragma omp parallel reduction(+:sum)
{
#pragma omp single
{
thread_num = omp_get_num_threads();
}
sum += i;
}
printf("thread num=%d sum =%d\n", thread_num, sum);
assert(sum == (i*thread_num + 100));
sum = 100;
#pragma omp parallel private(sum)
{
#pragma omp single
{
thread_num = omp_get_num_threads();
}
sum += i;
}
printf("thread num=%d sum =%d\n", thread_num, sum);
assert(sum != (i*thread_num + 100));
return 0;
}
|
Fig_10.1_parClaws.c | // sample compile command: "gcc -fopenmp -c Fig_10.1_parClaw.c" to generate *.o object file
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
// initialization and transform functions
// (we will not show the function bodies)
extern void initMats(int N, float *A, float *T);
extern void transform(int N, int id, int Nthrds, float *A, float *T);
int main(int argc, char**argv)
{
float trace=0;
int i, id, N, Nthrds;
float *A, *T;
// set matrix order N
if (argc == 2)
N = atoi(argv[1]);
else
N = 10;
// allocate space for three N x N matrices and initialize them
T = (float *) malloc(N*N*sizeof(float));
A = (float *) malloc(N*N*sizeof(float));
initMats(N, A, T);
#pragma omp parallel if(N>100) num_threads(4) default(none) \
shared(A,T,N) private (i,id,Nthrds) reduction(+:trace)
{
id = omp_get_thread_num();
Nthrds = omp_get_num_threads();
transform(N, id, Nthrds, T, A);
// compute trace of A matrix
// i.e., the sum of diagonal elements
#pragma omp for
for (i = 0; i < N; i++)
trace += *(A+i*N+i);
}
printf(" transform complete with trace = \%f\n",trace);
}
|
lis_precon_ilut.c | /* Copyright (C) 2002-2012 The SSI Project. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the project nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE
PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This subroutine is made based on ITSOL.
*
* http://www-users.cs.umn.edu/~saad/software/ITSOL/
*
*/
#ifdef HAVE_CONFIG_H
#include "lis_config.h"
#else
#ifdef HAVE_CONFIG_WIN32_H
#include "lis_config_win32.h"
#endif
#endif
#include <stdio.h>
#include <stdlib.h>
#ifdef HAVE_MALLOC_H
#include <malloc.h>
#endif
#include <string.h>
#include <stdarg.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef USE_MPI
#include <mpi.h>
#endif
#include "lislib.h"
#define EBABLE_BSR 0
#undef __FUNC__
#define __FUNC__ "lis_precon_create_ilut"
LIS_INT lis_precon_create_ilut(LIS_SOLVER solver, LIS_PRECON precon)
{
#ifdef ENABLE_BSR
LIS_INT storage,block;
#endif
LIS_INT err;
LIS_MATRIX A,B;
LIS_DEBUG_FUNC_IN;
#ifdef ENABLE_BSR
storage = solver->options[LIS_OPTIONS_STORAGE];
block = solver->options[LIS_OPTIONS_STORAGE_BLOCK];
if( solver->A->matrix_type!=LIS_MATRIX_BSR && storage==LIS_MATRIX_BSR )
{
err = lis_matrix_convert_self(solver);
if( err ) return err;
}
#endif
switch( solver->A->matrix_type )
{
case LIS_MATRIX_CRS:
err = lis_precon_create_ilut_crs(solver,precon);
lis_psolve_xxx[LIS_PRECON_TYPE_ILUT] = lis_psolve_ilut_crs;
lis_psolvet_xxx[LIS_PRECON_TYPE_ILUT] = lis_psolvet_ilut_crs;
break;
#ifdef ENABLE_BSR
case LIS_MATRIX_BSR:
err = lis_precon_create_ilut_bsr(solver,precon);
lis_psolve_xxx[LIS_PRECON_TYPE_ILUT] = lis_psolve_ilut_bsr;
lis_psolvet_xxx[LIS_PRECON_TYPE_ILUT] = lis_psolvet_ilut_bsr;
break;
#endif
default:
A = solver->A;
err = lis_matrix_duplicate(A,&B);
if( err ) return err;
lis_matrix_set_type(B,LIS_MATRIX_CRS);
err = lis_matrix_convert(A,B);
if( err ) return err;
solver->A = B;
err = lis_precon_create_ilut_crs(solver,precon);
lis_psolve_xxx[LIS_PRECON_TYPE_ILUT] = lis_psolve_ilut_crs;
lis_psolvet_xxx[LIS_PRECON_TYPE_ILUT] = lis_psolvet_ilut_crs;
lis_matrix_destroy(B);
solver->A = A;
break;
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_precon_create_ilut_crs"
LIS_INT lis_precon_create_ilut_crs(LIS_SOLVER solver, LIS_PRECON precon)
{
#ifdef _OPENMP
LIS_INT err;
LIS_INT i,j,k,ii,jj,kk;
LIS_INT is,ie,my_rank,nprocs;
LIS_INT n,nr,nnz,lfil,len;
LIS_SCALAR gamma,t,tol,toldd,m;
LIS_MATRIX A;
LIS_MATRIX_ILU L,U;
LIS_VECTOR D;
LIS_SCALAR tnorm, tolnorm;
LIS_SCALAR fact,lxu,*wn,*w;
LIS_INT lenu,lenl,col,jpos,jrow,upos,para;
LIS_INT *jbuf,*iw;
LIS_DEBUG_FUNC_IN;
A = solver->A;
n = A->n;
tol = solver->params[LIS_PARAMS_DROP-LIS_OPTIONS_LEN];
m = solver->params[LIS_PARAMS_RATE-LIS_OPTIONS_LEN];
gamma = solver->params[LIS_PARAMS_GAMMA-LIS_OPTIONS_LEN];
lfil = (LIS_INT)((double)A->nnz/(2.0*n))*m;
nprocs = omp_get_max_threads();
L = NULL;
U = NULL;
err = lis_matrix_ilu_create(n,1,&L);
if( err ) return err;
err = lis_matrix_ilu_create(n,1,&U);
if( err ) return err;
err = lis_matrix_ilu_setCR(L);
if( err ) return err;
err = lis_matrix_ilu_setCR(U);
if( err ) return err;
err = lis_vector_duplicate(A,&D);
if( err )
{
return err;
}
w = (LIS_SCALAR *)lis_malloc(nprocs*(n+1)*sizeof(LIS_SCALAR),"lis_precon_create_ilut_crs::w");
if( w==NULL )
{
LIS_SETERR_MEM(nprocs*(n+1)*sizeof(LIS_SCALAR));
return LIS_OUT_OF_MEMORY;
}
wn = (LIS_SCALAR *)lis_malloc(nprocs*n*sizeof(LIS_SCALAR),"lis_precon_create_ilut_crs::w");
if( wn==NULL )
{
LIS_SETERR_MEM(nprocs*n*sizeof(LIS_SCALAR));
return LIS_OUT_OF_MEMORY;
}
jbuf = (LIS_INT *)lis_malloc(nprocs*n*sizeof(LIS_INT),"lis_precon_create_ilut_crs::iw");
if( jbuf==NULL )
{
LIS_SETERR_MEM(nprocs*n*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
iw = (LIS_INT *)lis_malloc(nprocs*n*sizeof(LIS_INT),"lis_precon_create_ilut_crs::iw");
if( iw==NULL )
{
LIS_SETERR_MEM(nprocs*n*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
#pragma omp parallel private(is,ie,my_rank,i,j,k,jj,tnorm,tolnorm,len,lenu,lenl,col,t,jpos,jrow,fact,lxu,upos)
{
my_rank = omp_get_thread_num();
LIS_GET_ISIE(my_rank,nprocs,n,is,ie);
for(i=is;i<ie;i++) iw[my_rank*n+i] = -1;
for(i=is;i<ie;i++)
{
tnorm = 0;
k = 0;
for(j=A->ptr[i];j<A->ptr[i+1];j++)
{
jj = A->index[j];
if( jj<is || jj>=ie ) continue;
tnorm += fabs(A->value[j]);
k++;
}
tnorm = tnorm / (double)k;
tolnorm = tol * tnorm;
lenu = 0;
lenl = 0;
jbuf[my_rank*n+i] = i;
w[my_rank*n+i] = 0;
iw[my_rank*n+i] = i;
for(j=A->ptr[i];j<A->ptr[i+1];j++)
{
col = A->index[j];
if( col<is || col>=ie ) continue;
t = A->value[j];
if( col < i )
{
jbuf[my_rank*n+lenl] = col;
iw[my_rank*n+col] = lenl;
w[my_rank*n+lenl] = t;
lenl++;
}
else if( col == i )
{
w[my_rank*n+i] = t;
}
else
{
lenu++;
jpos = i + lenu;
jbuf[my_rank*n+jpos] = col;
iw[my_rank*n+col] = jpos;
w[my_rank*n+jpos] = t;
}
}
j = -1;
len = 0;
while( ++j < lenl )
{
jrow = jbuf[my_rank*n+j];
jpos = j;
for(k=j+1;k<lenl;k++)
{
if( jbuf[my_rank*n+k]<jrow )
{
jrow = jbuf[my_rank*n+k];
jpos = k;
}
}
if( jpos!=j )
{
col = jbuf[my_rank*n+j];
jbuf[my_rank*n+j] = jbuf[my_rank*n+jpos];
jbuf[my_rank*n+jpos] = col;
iw[my_rank*n+jrow] = j;
iw[my_rank*n+col] = jpos;
t = w[my_rank*n+j];
w[my_rank*n+j] = w[my_rank*n+jpos];
w[my_rank*n+jpos] = t;
}
fact = w[my_rank*n+j] * D->value[jrow];
w[my_rank*n+j] = fact;
iw[my_rank*n+jrow] = -1;
for(k=0;k<U->nnz[jrow];k++)
{
col = U->index[jrow][k];
jpos = iw[my_rank*n+col];
lxu = -fact * U->value[jrow][k];
if( fabs(lxu) < tolnorm && jpos==-1 ) continue;
if( col >= i )
{
if( jpos == -1 )
{
lenu++;
upos = i + lenu;
jbuf[my_rank*n+upos] = col;
iw[my_rank*n+col] = upos;
w[my_rank*n+upos] = lxu;
}
else
{
w[my_rank*n+jpos] += lxu;
}
}
else
{
if( jpos == -1 )
{
jbuf[my_rank*n+lenl] = col;
iw[my_rank*n+col] = lenl;
w[my_rank*n+lenl] = lxu;
lenl++;
}
else
{
w[my_rank*n+jpos] += lxu;
}
}
}
}
iw[my_rank*n+i] = -1;
for(j=0;j<lenu;j++)
{
iw[ my_rank*n+jbuf[my_rank*n+i+j+1] ] = -1;
}
D->value[i] = 1.0 / w[my_rank*n+i];
len = _min(lfil,lenl);
for(j=0;j<lenl;j++)
{
wn[my_rank*n+j] = fabs(w[my_rank*n+j]);
iw[my_rank*n+j] = j;
}
lis_sort_di(0,lenl-1,&wn[my_rank*n],&iw[my_rank*n]);
lis_sort_i(0,len-1,&iw[my_rank*n]);
L->nnz[i] = len;
if( len>0 )
{
L->index[i] = (LIS_INT *)malloc(len*sizeof(LIS_INT));
L->value[i] = (LIS_SCALAR *)malloc(len*sizeof(LIS_SCALAR));
}
for(j=0;j<len;j++)
{
jpos = iw[my_rank*n+j];
L->index[i][j] = jbuf[my_rank*n+jpos];
L->value[i][j] = w[my_rank*n+jpos];
}
for(j=0;j<lenl;j++) iw[my_rank*n+j] = -1;
len = _min(lfil,lenu);
for(j=0;j<lenu;j++)
{
wn[my_rank*n+j] = fabs(w[my_rank*n+i+j+1]);
iw[my_rank*n+j] = i+j+1;
}
lis_sort_di(0,lenu-1,&wn[my_rank*n],&iw[my_rank*n]);
lis_sort_i(0,len-1,&iw[my_rank*n]);
U->nnz[i] = len;
if( len>0 )
{
U->index[i] = (LIS_INT *)malloc(len*sizeof(LIS_INT));
U->value[i] = (LIS_SCALAR *)malloc(len*sizeof(LIS_SCALAR));
}
for(j=0;j<len;j++)
{
jpos = iw[my_rank*n+j];
U->index[i][j] = jbuf[my_rank*n+jpos];
U->value[i][j] = w[my_rank*n+jpos];
}
for(j=0;j<lenu;j++) iw[my_rank*n+j] = -1;
}
}
precon->L = L;
precon->U = U;
precon->D = D;
lis_free2(4,w,iw,wn,jbuf);
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
#else
LIS_INT err;
LIS_INT i,j,k;
LIS_INT n,lfil,len;
LIS_SCALAR gamma,t,tol,m;
LIS_MATRIX A;
LIS_MATRIX_ILU L,U;
LIS_VECTOR D;
LIS_SCALAR tnorm, tolnorm;
LIS_SCALAR fact,lxu,*wn,*w;
LIS_INT lenu,lenl,col,jpos,jrow,upos;
LIS_INT *jbuf,*iw;
LIS_DEBUG_FUNC_IN;
A = solver->A;
n = A->n;
tol = solver->params[LIS_PARAMS_DROP-LIS_OPTIONS_LEN];
m = solver->params[LIS_PARAMS_RATE-LIS_OPTIONS_LEN];
gamma = solver->params[LIS_PARAMS_GAMMA-LIS_OPTIONS_LEN];
lfil = (LIS_INT)(((double)A->nnz/(2.0*n))*m);
L = NULL;
U = NULL;
err = lis_matrix_ilu_create(n,1,&L);
if( err ) return err;
err = lis_matrix_ilu_create(n,1,&U);
if( err ) return err;
err = lis_matrix_ilu_setCR(L);
if( err ) return err;
err = lis_matrix_ilu_setCR(U);
if( err ) return err;
err = lis_vector_duplicate(A,&D);
if( err )
{
return err;
}
w = (LIS_SCALAR *)lis_malloc((n+1)*sizeof(LIS_SCALAR),"lis_precon_create_ilut_crs::w");
if( w==NULL )
{
LIS_SETERR_MEM(n*sizeof(LIS_SCALAR));
return LIS_OUT_OF_MEMORY;
}
wn = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR),"lis_precon_create_ilut_crs::w");
if( wn==NULL )
{
LIS_SETERR_MEM(n*sizeof(LIS_SCALAR));
return LIS_OUT_OF_MEMORY;
}
jbuf = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_precon_create_ilut_crs::iw");
if( jbuf==NULL )
{
LIS_SETERR_MEM(n*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
iw = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_precon_create_ilut_crs::iw");
if( iw==NULL )
{
LIS_SETERR_MEM(n*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
for(i=0;i<n;i++) iw[i] = -1;
for(i=0;i<n;i++)
{
tnorm = 0;
for(j=A->ptr[i];j<A->ptr[i+1];j++)
{
tnorm += fabs(A->value[j]);
}
tnorm = tnorm / (double)(A->ptr[i+1]-A->ptr[i]);
tolnorm = tol * tnorm;
lenu = 0;
lenl = 0;
jbuf[i] = i;
w[i] = 0;
iw[i] = i;
for(j=A->ptr[i];j<A->ptr[i+1];j++)
{
col = A->index[j];
#ifdef USE_MPI
if( col>n-1 ) continue;
#endif
t = A->value[j];
if( col < i )
{
jbuf[lenl] = col;
iw[col] = lenl;
w[lenl] = t;
lenl++;
}
else if( col == i )
{
w[i] = t;
}
else
{
lenu++;
jpos = i + lenu;
jbuf[jpos] = col;
iw[col] = jpos;
w[jpos] = t;
}
}
j = -1;
len = 0;
while( ++j < lenl )
{
jrow = jbuf[j];
jpos = j;
for(k=j+1;k<lenl;k++)
{
if( jbuf[k]<jrow )
{
jrow = jbuf[k];
jpos = k;
}
}
if( jpos!=j )
{
col = jbuf[j];
jbuf[j] = jbuf[jpos];
jbuf[jpos] = col;
iw[jrow] = j;
iw[col] = jpos;
t = w[j];
w[j] = w[jpos];
w[jpos] = t;
}
fact = w[j] * D->value[jrow];
w[j] = fact;
iw[jrow] = -1;
for(k=0;k<U->nnz[jrow];k++)
{
col = U->index[jrow][k];
jpos = iw[col];
lxu = -fact * U->value[jrow][k];
if( fabs(lxu) < tolnorm && jpos==-1 ) continue;
if( col >= i )
{
if( jpos == -1 )
{
lenu++;
upos = i + lenu;
jbuf[upos] = col;
iw[col] = upos;
w[upos] = lxu;
}
else
{
w[jpos] += lxu;
}
}
else
{
if( jpos == -1 )
{
jbuf[lenl] = col;
iw[col] = lenl;
w[lenl] = lxu;
lenl++;
}
else
{
w[jpos] += lxu;
}
}
}
/* for(kk=0;kk<bs;kk++)
{
w[bs*len+kk] = -buf_fact[kk];
}
jbuf[len] = jrow;
len++;*/
}
iw[i] = -1;
for(j=0;j<lenu;j++)
{
iw[ jbuf[i+j+1] ] = -1;
}
D->value[i] = 1.0 / w[i];
len = _min(lfil,lenl);
for(j=0;j<lenl;j++)
{
wn[j] = fabs(w[j]);
iw[j] = j;
}
lis_sort_di(0,lenl-1,wn,iw);
lis_sort_i(0,len-1,iw);
L->nnz[i] = len;
if( len>0 )
{
L->index[i] = (LIS_INT *)malloc(len*sizeof(LIS_INT));
L->value[i] = (LIS_SCALAR *)malloc(len*sizeof(LIS_SCALAR));
}
for(j=0;j<len;j++)
{
jpos = iw[j];
L->index[i][j] = jbuf[jpos];
L->value[i][j] = w[jpos];
}
for(j=0;j<lenl;j++) iw[j] = -1;
len = _min(lfil,lenu);
for(j=0;j<lenu;j++)
{
wn[j] = fabs(w[i+j+1]);
iw[j] = i+j+1;
}
lis_sort_di(0,lenu-1,wn,iw);
lis_sort_i(0,len-1,iw);
U->nnz[i] = len;
if( len>0 )
{
U->index[i] = (LIS_INT *)malloc(len*sizeof(LIS_INT));
U->value[i] = (LIS_SCALAR *)malloc(len*sizeof(LIS_SCALAR));
}
for(j=0;j<len;j++)
{
jpos = iw[j];
U->index[i][j] = jbuf[jpos];
U->value[i][j] = w[jpos];
}
for(j=0;j<lenu;j++) iw[j] = -1;
}
precon->L = L;
precon->U = U;
precon->D = D;
lis_free2(4,w,iw,wn,jbuf);
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
#endif
}
#undef __FUNC__
#define __FUNC__ "lis_psolve_ilut_crs"
LIS_INT lis_psolve_ilut_crs(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X)
{
#ifdef _OPENMP
LIS_INT i,j,jj,n;
LIS_INT nprocs,my_rank,is,ie;
LIS_SCALAR t;
LIS_SCALAR *b,*x;
LIS_MATRIX_ILU L,U;
LIS_VECTOR D;
LIS_PRECON precon;
LIS_QUAD_DECLAR;
#ifdef USE_QUAD_PRECISION
LIS_SCALAR *xl;
#endif
/*
* LUx = b
* LU = (D + L*A) * (I + D^-1 * U*A)
*/
LIS_DEBUG_FUNC_IN;
precon = solver->precon;
L = precon->L;
U = precon->U;
D = precon->D;
b = B->value;
x = X->value;
#ifdef USE_QUAD_PRECISION
xl = X->value_lo;
#endif
n = solver->A->n;
#ifdef USE_QUAD_PRECISION
if( B->precision==LIS_PRECISION_DEFAULT )
{
#endif
lis_vector_copy(B,X);
nprocs = omp_get_max_threads();
#pragma omp parallel private(i,j,jj,is,ie,my_rank)
{
my_rank = omp_get_thread_num();
LIS_GET_ISIE(my_rank,nprocs,n,is,ie);
for(i=is; i<ie; i++)
{
for(j=0;j<L->nnz[i];j++)
{
jj = L->index[i][j];
x[i] -= L->value[i][j] * x[jj];
}
}
for(i=ie-1; i>=is; i--)
{
for(j=0;j<U->nnz[i];j++)
{
jj = U->index[i][j];
x[i] -= U->value[i][j] * x[jj];
}
x[i] = D->value[i] * x[i];
}
}
#ifdef USE_QUAD_PRECISION
}
else
{
lis_vector_copyex_mm(B,X);
nprocs = omp_get_max_threads();
#ifndef USE_SSE2
#pragma omp parallel private(i,j,jj,is,ie,my_rank,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el)
#else
#pragma omp parallel private(i,j,jj,is,ie,my_rank,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh)
#endif
{
my_rank = omp_get_thread_num();
LIS_GET_ISIE(my_rank,nprocs,n,is,ie);
for(i=is; i<ie; i++)
{
for(j=0;j<L->nnz[i];j++)
{
jj = L->index[i][j];
#ifndef USE_SSE2
LIS_QUAD_FMAD(x[i],xl[i],x[i],xl[i],x[jj],xl[jj],-L->value[i][j]);
#else
LIS_QUAD_FMAD_SSE2(x[i],xl[i],x[i],xl[i],x[jj],xl[jj],-L->value[i][j]);
#endif
/* x[i] -= L->value[i][j] * x[jj];*/
}
}
for(i=ie-1; i>=is; i--)
{
for(j=0;j<U->nnz[i];j++)
{
jj = U->index[i][j];
#ifndef USE_SSE2
LIS_QUAD_FMAD(x[i],xl[i],x[i],xl[i],x[jj],xl[jj],-U->value[i][j]);
#else
LIS_QUAD_FMAD_SSE2(x[i],xl[i],x[i],xl[i],x[jj],xl[jj],-U->value[i][j]);
#endif
/* x[i] -= U->value[i][j] * x[jj];*/
}
#ifndef USE_SSE2
LIS_QUAD_MULD(x[i],xl[i],x[i],xl[i],D->value[i]);
#else
LIS_QUAD_MULD_SSE2(x[i],xl[i],x[i],xl[i],D->value[i]);
#endif
/* x[i] = D->value[i] * x[i];*/
}
}
}
#endif
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
#else
LIS_INT i,j,jj,n;
LIS_SCALAR *b,*x;
LIS_MATRIX_ILU L,U;
LIS_VECTOR D;
LIS_PRECON precon;
LIS_QUAD_DECLAR;
#ifdef USE_QUAD_PRECISION
LIS_SCALAR *xl;
#endif
/*
* LUx = b
* LU = (D + L*A) * (I + D^-1 * U*A)
*/
LIS_DEBUG_FUNC_IN;
precon = solver->precon;
L = precon->L;
U = precon->U;
D = precon->D;
b = B->value;
x = X->value;
#ifdef USE_QUAD_PRECISION
xl = X->value_lo;
#endif
n = solver->A->n;
#ifdef USE_QUAD_PRECISION
if( B->precision==LIS_PRECISION_DEFAULT )
{
#endif
lis_vector_copy(B,X);
for(i=0; i<n; i++)
{
for(j=0;j<L->nnz[i];j++)
{
jj = L->index[i][j];
x[i] -= L->value[i][j] * x[jj];
}
}
for(i=n-1; i>=0; i--)
{
for(j=0;j<U->nnz[i];j++)
{
jj = U->index[i][j];
x[i] -= U->value[i][j] * x[jj];
}
x[i] = D->value[i] * x[i];
}
#ifdef USE_QUAD_PRECISION
}
else
{
lis_vector_copy(B,X);
for(i=0; i<n; i++)
{
for(j=0;j<L->nnz[i];j++)
{
jj = L->index[i][j];
#ifndef USE_SSE2
LIS_QUAD_FMAD(x[i],xl[i],x[i],xl[i],x[jj],xl[jj],-L->value[i][j]);
#else
LIS_QUAD_FMAD_SSE2(x[i],xl[i],x[i],xl[i],x[jj],xl[jj],-L->value[i][j]);
#endif
/* x[i] -= L->value[i][j] * x[jj];*/
}
}
for(i=n-1; i>=0; i--)
{
for(j=0;j<U->nnz[i];j++)
{
jj = U->index[i][j];
#ifndef USE_SSE2
LIS_QUAD_FMAD(x[i],xl[i],x[i],xl[i],x[jj],xl[jj],-U->value[i][j]);
#else
LIS_QUAD_FMAD_SSE2(x[i],xl[i],x[i],xl[i],x[jj],xl[jj],-U->value[i][j]);
#endif
/* x[i] -= U->value[i][j] * x[jj];*/
}
#ifndef USE_SSE2
LIS_QUAD_MULD(x[i],xl[i],x[i],xl[i],D->value[i]);
#else
LIS_QUAD_MULD_SSE2(x[i],xl[i],x[i],xl[i],D->value[i]);
#endif
/* x[i] = D->value[i]*x[i];*/
}
}
#endif
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
#endif
}
#undef __FUNC__
#define __FUNC__ "lis_psolvet_ilut_crs"
LIS_INT lis_psolvet_ilut_crs(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X)
{
#ifdef _OPENMP
LIS_INT i,j,jj,n;
LIS_INT is,ie,my_rank,nprocs;
LIS_SCALAR *b,*x;
LIS_MATRIX_ILU L,U;
LIS_VECTOR D;
LIS_PRECON precon;
LIS_QUAD_DECLAR;
#ifdef USE_QUAD_PRECISION
LIS_SCALAR *xl;
#endif
LIS_DEBUG_FUNC_IN;
precon = solver->precon;
L = precon->L;
U = precon->U;
D = precon->D;
b = B->value;
x = X->value;
#ifdef USE_QUAD_PRECISION
xl = X->value_lo;
#endif
n = solver->A->n;
nprocs = omp_get_max_threads();
#ifdef USE_QUAD_PRECISION
if( B->precision==LIS_PRECISION_DEFAULT )
{
#endif
lis_vector_copy(B,X);
#pragma omp parallel private(i,j,jj,is,ie,my_rank)
{
my_rank = omp_get_thread_num();
LIS_GET_ISIE(my_rank,nprocs,n,is,ie);
for(i=is;i<ie;i++)
{
x[i] = D->value[i]*x[i];
for(j=0;j<U->nnz[i];j++)
{
jj = U->index[i][j];
x[jj] -= U->value[i][j] * x[i];
}
}
for(i=ie-1;i>=is;i--)
{
for(j=0;j<L->nnz[i];j++)
{
jj = L->index[i][j];
x[jj] -= L->value[i][j] * x[i];
}
}
}
#ifdef USE_QUAD_PRECISION
}
else
{
lis_vector_copyex_mm(B,X);
nprocs = omp_get_max_threads();
#ifndef USE_SSE2
#pragma omp parallel private(i,j,jj,is,ie,my_rank,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el)
#else
#pragma omp parallel private(i,j,jj,is,ie,my_rank,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh)
#endif
{
my_rank = omp_get_thread_num();
LIS_GET_ISIE(my_rank,nprocs,n,is,ie);
for(i=is;i<ie;i++)
{
#ifndef USE_SSE2
LIS_QUAD_MULD(x[i],xl[i],x[i],xl[i],D->value[i]);
#else
LIS_QUAD_MULD_SSE2(x[i],xl[i],x[i],xl[i],D->value[i]);
#endif
/* x[i] = D->value[i]*x[i];*/
for(j=0;j<U->nnz[i];j++)
{
jj = U->index[i][j];
#ifndef USE_SSE2
LIS_QUAD_FMAD(x[jj],xl[jj],x[jj],xl[jj],x[i],xl[i],-U->value[i][j]);
#else
LIS_QUAD_FMAD_SSE2(x[jj],xl[jj],x[jj],xl[jj],x[i],xl[i],-U->value[i][j]);
#endif
/* x[jj] -= U->value[i][j] * x[i];*/
}
}
for(i=ie-1;i>=is;i--)
{
for(j=0;j<L->nnz[i];j++)
{
jj = L->index[i][j];
#ifndef USE_SSE2
LIS_QUAD_FMAD(x[jj],xl[jj],x[jj],xl[jj],x[i],xl[i],-L->value[i][j]);
#else
LIS_QUAD_FMAD_SSE2(x[jj],xl[jj],x[jj],xl[jj],x[i],xl[i],-L->value[i][j]);
#endif
/* x[jj] -= L->value[i][j] * x[i];*/
}
}
}
}
#endif
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
#else
LIS_INT i,j,jj,n;
LIS_SCALAR *b,*x;
LIS_MATRIX_ILU L,U;
LIS_VECTOR D;
LIS_PRECON precon;
LIS_QUAD_DECLAR;
#ifdef USE_QUAD_PRECISION
LIS_SCALAR *xl;
#endif
LIS_DEBUG_FUNC_IN;
precon = solver->precon;
L = precon->L;
U = precon->U;
D = precon->D;
b = B->value;
x = X->value;
#ifdef USE_QUAD_PRECISION
xl = X->value_lo;
#endif
n = solver->A->n;
#ifdef USE_QUAD_PRECISION
if( B->precision==LIS_PRECISION_DEFAULT )
{
#endif
lis_vector_copy(B,X);
for(i=0; i<n; i++)
{
x[i] = D->value[i]*x[i];
for(j=0;j<U->nnz[i];j++)
{
jj = U->index[i][j];
x[jj] -= U->value[i][j] * x[i];
}
}
for(i=n-1; i>=0; i--)
{
for(j=0;j<L->nnz[i];j++)
{
jj = L->index[i][j];
x[jj] -= L->value[i][j] * x[i];
}
}
#ifdef USE_QUAD_PRECISION
}
else
{
lis_vector_copy(B,X);
for(i=0; i<n; i++)
{
#ifndef USE_SSE2
LIS_QUAD_MULD(x[i],xl[i],x[i],xl[i],D->value[i]);
#else
LIS_QUAD_MULD_SSE2(x[i],xl[i],x[i],xl[i],D->value[i]);
#endif
/* x[i] = D->value[i]*x[i];*/
for(j=0;j<U->nnz[i];j++)
{
jj = U->index[i][j];
#ifndef USE_SSE2
LIS_QUAD_FMAD(x[jj],xl[jj],x[jj],xl[jj],x[i],xl[i],-U->value[i][j]);
#else
LIS_QUAD_FMAD_SSE2(x[jj],xl[jj],x[jj],xl[jj],x[i],xl[i],-U->value[i][j]);
#endif
/* x[jj] -= U->value[i][j] * x[i];*/
}
}
for(i=n-1; i>=0; i--)
{
for(j=0;j<L->nnz[i];j++)
{
jj = L->index[i][j];
#ifndef USE_SSE2
LIS_QUAD_FMAD(x[jj],xl[jj],x[jj],xl[jj],x[i],xl[i],-L->value[i][j]);
#else
LIS_QUAD_FMAD_SSE2(x[jj],xl[jj],x[jj],xl[jj],x[i],xl[i],-L->value[i][j]);
#endif
/* x[jj] -= L->value[i][j] * x[i];*/
}
}
}
#endif
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
#endif
}
#undef __FUNC__
#define __FUNC__ "lis_precon_create_ilut_bsr"
LIS_INT lis_precon_create_ilut_bsr(LIS_SOLVER solver, LIS_PRECON precon)
{
LIS_INT err;
LIS_INT i,j,k,kk,bnr,bs;
LIS_INT n,nr,annz,lfil,len;
LIS_SCALAR gamma,t,tol,m;
LIS_MATRIX A;
LIS_MATRIX_ILU L,U;
LIS_MATRIX_DIAG D;
LIS_SCALAR tnorm, tolnorm;
LIS_SCALAR buf_ns[16],buf_fact[16],*xnrm,*wn,*w;
LIS_INT lenu,lenl,col,jpos,jrow,upos,para;
LIS_INT *jbuf,*iw;
LIS_DEBUG_FUNC_IN;
A = solver->A;
n = A->n;
nr = A->nr;
bnr = A->bnr;
bs = bnr*bnr;
tol = solver->params[LIS_PARAMS_DROP-LIS_OPTIONS_LEN];
m = solver->params[LIS_PARAMS_RATE-LIS_OPTIONS_LEN];
gamma = solver->params[LIS_PARAMS_GAMMA-LIS_OPTIONS_LEN];
annz = 10+A->bnnz / A->nr;
lfil = (LIS_INT)(((double)A->bnnz/(2.0*nr))*m);
L = NULL;
U = NULL;
err = lis_matrix_ilu_create(nr,bnr,&L);
if( err ) return err;
err = lis_matrix_ilu_create(nr,bnr,&U);
if( err ) return err;
err = lis_matrix_ilu_setCR(L);
if( err ) return err;
err = lis_matrix_ilu_setCR(U);
if( err ) return err;
err = lis_matrix_diag_duplicateM(A,&D);
if( err )
{
return err;
}
w = (LIS_SCALAR *)lis_malloc(bs*(nr+1)*sizeof(LIS_SCALAR),"lis_precon_create_iluc_crs::w");
if( w==NULL )
{
LIS_SETERR_MEM(n*sizeof(LIS_SCALAR));
return LIS_OUT_OF_MEMORY;
}
xnrm = (LIS_SCALAR *)lis_malloc(nr*sizeof(LIS_SCALAR),"lis_precon_create_iluc_crs::w");
if( xnrm==NULL )
{
LIS_SETERR_MEM(n*sizeof(LIS_SCALAR));
return LIS_OUT_OF_MEMORY;
}
wn = (LIS_SCALAR *)lis_malloc(nr*sizeof(LIS_SCALAR),"lis_precon_create_iluc_crs::w");
if( wn==NULL )
{
LIS_SETERR_MEM(n*sizeof(LIS_SCALAR));
return LIS_OUT_OF_MEMORY;
}
jbuf = (LIS_INT *)lis_malloc(n*sizeof(LIS_INT),"lis_precon_create_iluc_crs::iw");
if( jbuf==NULL )
{
LIS_SETERR_MEM(n*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
iw = (LIS_INT *)lis_malloc(nr*sizeof(LIS_INT),"lis_precon_create_iluc_crs::iw");
if( iw==NULL )
{
LIS_SETERR_MEM(n*sizeof(LIS_INT));
return LIS_OUT_OF_MEMORY;
}
for(i=0;i<nr;i++) iw[i] = -1;
for(i=0;i<nr;i++)
{
tnorm = 0;
for(j=A->bptr[i];j<A->bptr[i+1];j++)
{
lis_array_nrm2(bs,&A->value[bs*j],&t);
tnorm = _max(t,tnorm);
}
tolnorm = tol * tnorm;
lenu = 1;
lenl = 0;
jbuf[i] = i;
memset(&w[bs*i],0,bs*sizeof(LIS_SCALAR));
iw[i] = i;
for(j=A->bptr[i];j<A->bptr[i+1];j++)
{
col = A->bindex[j];
lis_array_nrm2(bs,&A->value[bs*j],&t);
if( t<tolnorm && col!=i ) continue;
if( col < i )
{
jbuf[lenl] = col;
iw[col] = lenl;
memcpy(&w[bs*lenl],&A->value[bs*j],bs*sizeof(LIS_SCALAR));
lenl++;
}
else if( col == i )
{
memcpy(&w[bs*i],&A->value[bs*j],bs*sizeof(LIS_SCALAR));
}
else
{
jpos = i + lenu;
jbuf[jpos] = col;
iw[col] = jpos;
memcpy(&w[bs*jpos],&A->value[bs*j],bs*sizeof(LIS_SCALAR));
lenu++;
}
}
j = -1;
len = 0;
while( ++j < lenl )
{
jrow = jbuf[j];
jpos = j;
for(k=j+1;k<lenl;k++)
{
if( jbuf[k]<jrow )
{
jrow = jbuf[k];
jpos = k;
}
}
if( jpos!=j )
{
col = jbuf[j];
jbuf[j] = jbuf[jpos];
jbuf[jpos] = col;
iw[jrow] = j;
iw[col] = jpos;
memcpy(buf_ns,&w[bs*j],bs*sizeof(LIS_SCALAR));
memcpy(&w[bs*j],&w[bs*jpos],bs*sizeof(LIS_SCALAR));
memcpy(&w[bs*jpos],buf_ns,bs*sizeof(LIS_SCALAR));
}
/* lis_array_matmat(bnr,&D->value[bs*jrow],&w[bs*j],buf_fact,LIS_INS_VALUE);*/
lis_array_matinv(bnr,&D->value[bs*jrow],&w[bs*j],buf_fact);
iw[jrow] = -1;
lis_array_nrm2(bs,buf_fact,&t);
if( t * xnrm[jrow] <= tolnorm ) continue;
for(k=0;k<U->nnz[jrow];k++)
{
col = U->index[jrow][k];
lis_array_matmat(bnr,buf_fact,&U->value[jrow][bs*k],buf_ns,LIS_INS_VALUE);
jpos = iw[col];
lis_array_nrm2(bs,buf_ns,&t);
if( t < tolnorm && jpos == -1 )
{
continue;
}
if( col >= i )
{
if( jpos == -1 )
{
upos = i + lenu;
jbuf[upos] = col;
iw[col] = upos;
memcpy(&w[bs*upos],buf_ns,bs*sizeof(LIS_SCALAR));
lenu++;
}
else
{
for(kk=0;kk<bs;kk++)
{
w[bs*jpos+kk] += buf_ns[kk];
}
}
}
else
{
if( jpos == -1 )
{
jbuf[lenl] = col;
iw[col] = lenl;
memcpy(&w[bs*lenl],buf_ns,bs*sizeof(LIS_SCALAR));
lenl++;
}
else
{
for(kk=0;kk<bs;kk++)
{
w[bs*jpos+kk] += buf_ns[kk];
}
}
}
}
for(kk=0;kk<bs;kk++)
{
w[bs*len+kk] = -buf_fact[kk];
}
jbuf[len] = jrow;
len++;
}
lenl = len;
len = _min(lfil,lenl);
for(j=0;j<lenl;j++)
{
lis_array_nrm2(bs,&w[bs*j],&wn[j]);
iw[j] = j;
}
lis_sort_di(0,lenl-1,wn,iw);
lis_sort_i(0,len-1,iw);
L->nnz[i] = len;
if( len>0 )
{
L->index[i] = (LIS_INT *)malloc(len*sizeof(LIS_INT));
L->value[i] = (LIS_SCALAR *)malloc(bs*len*sizeof(LIS_SCALAR));
}
for(j=0;j<len;j++)
{
jpos = iw[j];
L->index[i][j] = jbuf[jpos];
memcpy(&L->value[i][bs*j],&w[bs*jpos],bs*sizeof(LIS_SCALAR));
}
for(j=0;j<lenl;j++) iw[j] = -1;
len = _min(lfil,lenu);
for(j=1;j<lenu;j++)
{
jpos = i+j;
lis_array_nrm2(bs,&w[bs*jpos],&wn[j-1]);
iw[j-1] = jpos;
}
para = lenu - 1;
lis_sort_di(0,para-1,wn,iw);
lis_sort_i(0,len-2,iw);
U->nnz[i] = len-1;
if( len>1 )
{
U->index[i] = (LIS_INT *)malloc((len-1)*sizeof(LIS_INT));
U->value[i] = (LIS_SCALAR *)malloc(bs*(len-1)*sizeof(LIS_SCALAR));
}
lis_array_nrm2(bs,&w[bs*i],&t);
for(j=0;j<len-1;j++)
{
jpos = iw[j];
U->index[i][j] = jbuf[jpos];
memcpy(&U->value[i][bs*j],&w[bs*jpos],bs*sizeof(LIS_SCALAR));
t = _max(t,wn[j]);
}
for(j=0;j<lenu-1;j++) iw[j] = -1;
xnrm[i] = t;
memcpy(&D->value[bs*i],&w[bs*i],bs*sizeof(LIS_SCALAR));
if( i==nr-1 )
{
switch(bnr)
{
case 2:
if( n%2!=0 )
{
D->value[4*(nr-1)+3] = 1.0;
}
break;
case 3:
if( n%3==1 )
{
D->value[9*(nr-1)+4] = 1.0;
D->value[9*(nr-1)+8] = 1.0;
}
else if( n%3==2 )
{
D->value[9*(nr-1)+8] = 1.0;
}
break;
}
}
/* lis_array_invGauss(bnr,&D->value[bs*i]);*/
lis_array_LUdecomp(bnr,&D->value[bs*i]);
for(j=0;j<lenu;j++)
{
iw[ jbuf[i+j] ] = -1;
}
}
precon->L = L;
precon->U = U;
precon->WD = D;
lis_free2(5,w,iw,xnrm,wn,jbuf);
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_psolve_ilut_bsr"
LIS_INT lis_psolve_ilut_bsr(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X)
{
LIS_INT i,j,jj,nr,bnr,bs;
LIS_SCALAR w[9];
LIS_SCALAR *b,*x;
LIS_MATRIX_ILU L,U;
LIS_MATRIX_DIAG D;
LIS_PRECON precon;
/*
* LUx = b
* LU = (D + L*A) * (I + D^-1 * U*A)
*/
LIS_DEBUG_FUNC_IN;
precon = solver->precon;
L = precon->L;
U = precon->U;
D = precon->WD;
b = B->value;
x = X->value;
nr = solver->A->nr;
bnr = solver->A->bnr;
bs = bnr*bnr;
lis_vector_copy(B,X);
for(i=0; i<nr; i++)
{
for(j=0;j<L->nnz[i];j++)
{
jj = L->index[i][j];
lis_array_matvec(bnr,&L->value[i][bs*j],&x[bnr*jj],&x[bnr*i],LIS_SUB_VALUE);
}
}
for(i=nr-1; i>=0; i--)
{
for(j=0;j<U->nnz[i];j++)
{
jj = U->index[i][j];
lis_array_matvec(bnr,&U->value[i][bs*j],&x[bnr*jj],&x[bnr*i],LIS_SUB_VALUE);
}
/* lis_array_matvec(bnr,&D->value[bs*i],&x[bnr*i],w,LIS_INS_VALUE);*/
lis_array_invvec(bnr,&D->value[bs*i],&x[bnr*i],w);
memcpy(&x[bnr*i],w,bnr*sizeof(LIS_SCALAR));
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
#undef __FUNC__
#define __FUNC__ "lis_psolvet_ilut_bsr"
LIS_INT lis_psolvet_ilut_bsr(LIS_SOLVER solver, LIS_VECTOR B, LIS_VECTOR X)
{
LIS_INT i,j,jj,nr,bnr,bs;
LIS_SCALAR w[9];
LIS_SCALAR *b,*x;
LIS_MATRIX_ILU L,U;
LIS_MATRIX_DIAG D;
LIS_PRECON precon;
/*
* LUx = b
* LU = (D + L*A) * (I + D^-1 * U*A)
*/
LIS_DEBUG_FUNC_IN;
precon = solver->precon;
L = precon->L;
U = precon->U;
D = precon->WD;
b = B->value;
x = X->value;
nr = solver->A->nr;
bnr = solver->A->bnr;
bs = bnr*bnr;
lis_vector_copy(B,X);
for(i=0; i<nr; i++)
{
lis_array_invvect(bnr,&D->value[bs*i],&x[bnr*i],w);
memcpy(&x[bnr*i],w,bnr*sizeof(LIS_SCALAR));
for(j=0;j<U->nnz[i];j++)
{
jj = U->index[i][j];
lis_array_matvect(bnr,&U->value[i][bs*j],&x[bnr*i],&x[bnr*jj],LIS_SUB_VALUE);
}
}
for(i=nr-1; i>=0; i--)
{
for(j=0;j<L->nnz[i];j++)
{
jj = L->index[i][j];
lis_array_matvect(bnr,&L->value[i][bs*j],&x[bnr*i],&x[bnr*jj],LIS_SUB_VALUE);
}
}
LIS_DEBUG_FUNC_OUT;
return LIS_SUCCESS;
}
|
kernel_launcher.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include <sys/time.h>
#include "homp.h"
#include "stencil3d.h"
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
extern __global__ void stencil3d_nvgpu_kernel(int start_n, int len_n, long n, long m, int u_dimX, int u_dimY, REAL *u, REAL *uold, int radius, int coeff_dimX, REAL *coeff);
#endif
void stencil3d_omp_mdev_off_launcher(omp_offloading_t *off, void *args) {
struct stencil3d_off_args * iargs = (struct stencil3d_off_args*) args;
long n = iargs->n;
long m = iargs->m;
long k = iargs->k;
int radius = iargs->radius;
int num_its = iargs->num_its;
long u_dimX = iargs->u_dimX;
long u_dimY = iargs->u_dimY;
long u_dimZ = iargs->u_dimZ;
int coeff_dimX = iargs->coeff_dimX;
omp_data_map_t * map_u = omp_map_get_map(off, iargs->u, -1); /* 1 is for the map u */
omp_data_map_t * map_uold = omp_map_get_map(off, iargs->uold, -1); /* 2 is for the map uld */
omp_data_map_t * map_coeff = omp_map_get_map(off, iargs->coeff, -1); /* 2 is for the map uld */
REAL * u = (REAL*) map_u->map_dev_wextra_ptr;
REAL * uold = (REAL*) map_uold->map_dev_wextra_ptr;
REAL *coeff = (REAL*) map_coeff->map_dev_wextra_ptr;
coeff = coeff + (2*radius+1) * radius + radius; /* TODO this should be a call to map a host-side address to dev-side address*/ //NOTE: dont know the usage.
int count = 6*radius+1;
#ifdef SQUARE_SETNCIL
count = coeff_dimX * coeff_dimX * coeff_dimX;
#endif
long it; /* iteration */
#if CORRECTNESS_CHECK
printf("kernel launcher: u: %X, uold: %X\n", u, uold);
print_array("u in device: ", "udev", u, n, m, k);
print_array("uold in device: ", "uolddev", uold, n, m, k);
#endif
long offset;
long start;
long len;
if (dist_dim == 1) {
offset = omp_loop_get_range(off, 0, &start, &len);
} else if (dist_dim == 2) {
omp_loop_get_range(off, 0, &start, &len);
} else /* vx == 3) */ {
omp_loop_get_range(off, 0, &start, &len); /* todo */
omp_loop_get_range(off, 0, &start, &len); /* todo */
}
omp_device_type_t devtype = off->dev->type;
//printf("dev: %d, offset: %d, length: %d, local start: %d, u: %X, uold: %X, coeff-center: %X\n", off->devseqid, offset, len, start, u, uold, coeff);
//#pragma omp parallel shared(n, m, radius, coeff, num_its, u_dimX, u_dimY, coeff_dimX) private(it) firstprivate(u, uold)
for (it = 0; it < num_its; it++) {
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
if (devtype == OMP_DEVICE_NVGPU) {
dim3 threads_per_team(16, 16);//NOTE: Dont know to increae it further
dim3 teams_per_league((len+threads_per_team.x-1)/threads_per_team.x, (m+threads_per_team.y-1)/threads_per_team.y); /* we assume dividable */
stencil3d_nvgpu_kernel<<<teams_per_league, threads_per_team, 0, off->stream->systream.cudaStream>>>
(start, len, n, m, k, u_dimX, u_dimY, u_dimZ, u, uold, radius, coeff_dimX, coeff);
} else
#endif
if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
#if CORRECTNESS_CHECK
BEGIN_SERIALIZED_PRINTF(off->devseqid);
printf("udev: dev: %d, %dX%d\n", off->devseqid, n, m);
print_array_dev("udev", off->devseqid, "u",(REAL*)u, n, m, k);
printf("uolddev: dev: %d, %dX%d\n", off->devseqid, uold_0_length, uold_1_length);
print_array_dev("uolddev", off->devseqid, "uold",(REAL*)uold, uold_0_length, uold_1_length);
printf("i_start: %d, j_start: %d, n: %d, m: %d, k: %d, uold_0_offset: %d, uold_1_offset: %d\n", i_start, j_start, n, m, k, uold_0_offset, uold_1_offset);
END_SERIALIZED_PRINTF();
#endif
//#pragma omp for private(ix, iy, ir)
int ix, iy, iz, ir;
for (ix = start; ix < start+len; ix++) {
REAL * temp_u = &u[(ix+radius)*u_dimY*u_dimZ+radius];
REAL * temp_uold = &uold[(ix+radius)*u_dimY*u_dimZ+radius];
for (iy = 0; iy < m; iy++) {
for (iz = 0; iz < k; iz++) {
// if (off->devseqid == 0)printf("dev: %d, [%d][%d]:%f\n", off->devseqid, ix, iy, temp_u[0]);
REAL result = temp_uold[0] * coeff[0];
/* 2/4 way loop unrolling */
for (ir = 1; ir <= radius; ir++) {
result += coeff[ir] * temp_uold[ir]; //horizontal right
result += coeff[-ir]* temp_uold[-ir]; // horizontal left
result += coeff[-ir*coeff_dimX] * temp_uold[-ir * u_dimY]; //vertical up
result += coeff[ir*coeff_dimX] * temp_uold[ir * u_dimY]; // vertical bottom
result += coeff[-ir*coeff_dimX] * temp_uold[-ir * u_dimZ]; //vertical up - z
result += coeff[ir*coeff_dimX] * temp_uold[ir * u_dimZ]; // vertical bottom - z
#ifdef SQUARE_SETNCIL
result += coeff[-ir*coeff_dimX-ir] * temp_uold[-ir * u_dimY-ir] // left upper corner
result += coeff[-ir*coeff_dimX+ir] * temp_uold[-ir * u_dimY+ir] // right upper corner
result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimY]-ir] // left bottom corner
result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimY]+ir] // right bottom corner
result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimZ]-ir] // left bottom corner - Z
result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimZ]+ir] // right bottom corner - Z
#endif
}
*temp_u = result/count;
temp_u++;
temp_uold++;
}//z loop end
}//y end
}
} else {
fprintf(stderr, "device type is not supported for this call\n");
}
pthread_barrier_wait(&off->off_info->inter_dev_barrier);
if (it % 2 == 0) omp_halo_region_pull(map_u, 0, OMP_DATA_MAP_EXCHANGE_FROM_LEFT_RIGHT);
else omp_halo_region_pull(map_uold, 0, OMP_DATA_MAP_EXCHANGE_FROM_LEFT_RIGHT);
REAL * tmp = uold;
uold = u;
u = tmp;
}
}
void stencil3d_omp_mdev_iterate_off_launcher(omp_offloading_t * off, void *args) {
struct stencil3d_off_args * iargs = (struct stencil3d_off_args*) args;
long n = iargs->n;
long m = iargs->m;
long k = iargs->k;
int radius = iargs->radius;
int num_its = iargs->num_its;
long u_dimX = iargs->u_dimX;
long u_dimY = iargs->u_dimY;
long u_dimZ = iargs->u_dimZ;
int coeff_dimX = iargs->coeff_dimX;
omp_data_map_t * map_u = omp_map_get_map(off, iargs->u, -1); /* 1 is for the map u */
omp_data_map_t * map_uold = omp_map_get_map(off, iargs->uold, -1); /* 2 is for the map uld */
omp_data_map_t * map_coeff = omp_map_get_map(off, iargs->coeff, -1); /* 2 is for the map uld */
REAL * u = (REAL*) map_u->map_dev_wextra_ptr;
REAL * uold = (REAL*) map_uold->map_dev_wextra_ptr;
REAL *coeff = (REAL*) map_coeff->map_dev_wextra_ptr;
coeff = coeff + (2*radius+1) * radius + radius; /* TODO this should be a call to map a host-side address to dev-side address*/ //NOTE: No change introduced here, not sure of funtly
int count = 6*radius+1;
#ifdef SQUARE_SETNCIL
count = coeff_dimX * coeff_dimX * coeff_dimX;
#endif
long it; /* iteration */
#if CORRECTNESS_CHECK
printf("kernel launcher: u: %X, uold: %X\n", u, uold);
print_array("u in device: ", "udev", u, n, m, k);
print_array("uold in device: ", "uolddev", uold, n, m, k);
#endif
long offset;
long start;
long len;
if (dist_dim == 1) {
offset = omp_loop_get_range(off, 0, &start, &len);
} else if (dist_dim == 2) {
omp_loop_get_range(off, 0, &start, &len);
} else /* vx == 3) */ {
omp_loop_get_range(off, 0, &start, &len); /* todo */
omp_loop_get_range(off, 0, &start, &len); /* todo */
}
omp_device_type_t devtype = off->dev->type;
//printf("dev: %d, offset: %d, length: %d, local start: %d, u: %X, uold: %X, coeff-center: %X\n", off->devseqid, offset, len, start, u, uold, coeff);
//#pragma omp parallel shared(n, m, radius, coeff, num_its, u_dimX, u_dimY, coeff_dimX) private(it) firstprivate(u, uold)
#if defined (DEVICE_NVGPU_CUDA_SUPPORT)
if (devtype == OMP_DEVICE_NVGPU) {
dim3 threads_per_team(16, 16);
dim3 teams_per_league((len+threads_per_team.x-1)/threads_per_team.x, (m+threads_per_team.y-1)/threads_per_team.y); /* we assume dividable */
stencil3d_nvgpu_kernel<<<teams_per_league, threads_per_team, 0, off->stream->systream.cudaStream>>>
(start, len, n, m, k, u_dimX, u_dimY, u_dimZ, u, uold, radius, coeff_dimX, coeff);//NOTE: same as previous decl.
} else
#endif
if (devtype == OMP_DEVICE_THSIM || devtype == OMP_DEVICE_HOSTCPU) {
#if CORRECTNESS_CHECK
BEGIN_SERIALIZED_PRINTF(off->devseqid);
printf("udev: dev: %d, %dX%d\n", off->devseqid, n, m, k);
print_array_dev("udev", off->devseqid, "u",(REAL*)u, n, m, k);
printf("uolddev: dev: %d, %dX%d\n", off->devseqid, uold_0_length, uold_1_length);
print_array_dev("uolddev", off->devseqid, "uold",(REAL*)uold, uold_0_length, uold_1_length);
printf("i_start: %d, j_start: %d, n: %d, m: %d, k: %d, uold_0_offset: %d, uold_1_offset: %d\n", i_start, j_start, n, m, k, uold_0_offset, uold_1_offset);
END_SERIALIZED_PRINTF();
#endif
//#pragma omp for private(ix, iy, ir)
int ix, iy, iz, ir;
for (ix = start; ix < start + len; ix++) {
REAL *temp_u = &u[(ix + radius) * u_dimY * u_dimZ + radius];
REAL *temp_uold = &uold[(ix + radius) * u_dimY * u_dimZ + radius];
for (iy = 0; iy < m; iy++) {
for (iz = 0; iz < k; iz++) {
// if (off->devseqid == 0)printf("dev: %d, [%d][%d]:%f\n", off->devseqid, ix, iy, temp_u[0]);
REAL result = temp_uold[0] * coeff[0];
/* 2/4 way loop unrolling */
for (ir = 1; ir <= radius; ir++) {
result += coeff[ir] * temp_uold[ir]; //horizontal right
result += coeff[-ir] * temp_uold[-ir]; // horizontal left
result += coeff[-ir * coeff_dimX] * temp_uold[-ir * u_dimY]; //vertical up
result += coeff[ir * coeff_dimX] * temp_uold[ir * u_dimY]; // vertical bottom
result += coeff[-ir * coeff_dimX] * temp_uold[-ir * u_dimZ]; //vertical up - z
result += coeff[ir * coeff_dimX] * temp_uold[ir * u_dimZ]; // vertical bottom - z
#ifdef SQUARE_SETNCIL
result += coeff[-ir*coeff_dimX-ir] * temp_uold[-ir * u_dimY-ir] // left upper corner
result += coeff[-ir*coeff_dimX+ir] * temp_uold[-ir * u_dimY+ir] // right upper corner
result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimY]-ir] // left bottom corner
result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimY]+ir] // right bottom corner
result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimZ]-ir] // left bottom corner - z
result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimZ]+ir] // right bottom corner - z
#endif
}
*temp_u = result / count;
temp_u++;
temp_uold++;
}//z end
}//y end
}
} else {
fprintf(stderr, "device type is not supported for this call\n");
}
/*
pthread_barrier_wait(&off->off_info->inter_dev_barrier);
omp_halo_region_pull(map_u, 0, OMP_DATA_MAP_EXCHANGE_FROM_LEFT_RIGHT);
*/
}
|
chisquare.h | /*
This file is part of Mitsuba, a physically based rendering system.
Copyright (c) 2007-2014 by Wenzel Jakob and others.
Mitsuba is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License Version 3
as published by the Free Software Foundation.
Mitsuba is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#if !defined(__MITSUBA_CORE_CHISQUARE_H_)
#define __MITSUBA_CORE_CHISQUARE_H_
#include <mitsuba/render/common.h>
#include <boost/tuple/tuple.hpp>
#include <boost/function.hpp>
MTS_NAMESPACE_BEGIN
/// Minimum expected cell frequency. Cells below this value will be pooled
#define CHISQR_MIN_EXP_FREQUENCY 5
/**
* \brief Chi-square goodness-of-fit test on the sphere
*
* This class performs a chi-square goodness-of-fit test of the null hypothesis
* that a specified sampling procedure produces samples that are distributed
* according to a supplied density function. This is very useful to verify BRDF
* and phase function sampling codes for their correctness. Currently, it
* supports both 2D and discrete sampling methods and mixtures thereof.
*
* This implementation works by generating a large batch of samples, which are
* then accumulated into rectangular bins in spherical coordinates. To obtain
* reference bin counts, the provided density function is numerically
* integrated over the area of each bin. Comparing the actual and reference
* bin counts yields the desired test statistic.
*
* Given a probability distribution with the following interface
*
* \code
* class MyDistribution {
* // Sample a (optionally weighted) direction. A non-unity weight
* // in the return value is needed when the sampling distribution
* // doesn't exactly match the implementation in pdf()
* boost::tuple<Vector, Float, EMeasure> generateSample() const;
*
* /// Compute the probability density for the specified direction and measure
* Float pdf(const Vector &direction, EMeasure) const;
* };
* \endcode
*
* the code in this class might be used as follows
*
* \code
* MyDistribution myDistrInstance;
* ChiSquare chiSqr;
*
* // Initialize the tables used by the chi-square test
* chiSqr.fill(
* boost::bind(&MyDistribution::generateSample, myDistrInstance),
* boost::bind(&MyDistribution::pdf, myDistrInstance, _1, _2)
* );
*
* // Optional: dump the tables to a MATLAB file for external analysis
* chiSqr.dumpTables("debug.m");
*
* if (!chiSqr.runTest())
* Log(EError, "Uh oh -- test failed, the implementation is probably incorrect!");
* \endcode
* \ingroup libcore
*/
class MTS_EXPORT_CORE ChiSquare : public Object {
public:
/// Possible outcomes in \ref runTest()
enum ETestResult {
/// The null hypothesis was rejected
EReject = 0,
/// The null hypothesis was accepted
EAccept = 1,
/// The degrees of freedom were too low
ELowDoF = 2
};
/**
* \brief Create a new Chi-square test instance with the given
* resolution and sample count
*
* \param thetaBins
* Number of bins wrt. latitude. The default is 10
*
* \param phiBins
* Number of bins wrt. azimuth. The default is to use
* twice the number of \c thetaBins
*
* \param numTests
* Number of independent tests that will be performed. This
* is used to compute the Sidak-correction factor.
*
* \param sampleCount
* Number of samples to be used when computing the bin
* values. The default is \c thetaBins*phiBins*5000
*/
ChiSquare(int thetaBins = 10, int phiBins = 0,
int numTests = 1, size_t sampleCount = 0);
/// Get the log level
inline ELogLevel getLogLevel() const { return m_logLevel; }
/// Set the log level
inline void setLogLevel(ELogLevel logLevel) { m_logLevel = logLevel; }
/**
* \brief Set the tolerance threshold for bins with very low
* aggregate probabilities
*
* When the Chi-square test integrates the supplied probability
* density function over the support of a bin and determines that
* the aggregate bin probability is zero, the test would ordinarily
* fail if as much as one sample is placed in that bin in the
* subsequent sampling step. However, due to various numerical
* errors in a system based on finite-precision arithmetic, it
* may be a good idea to tolerate at least a few samples without
* immediately rejecting the null hypothesis. This parameter
* sets this threshold. The default value is \c number-of-samples*1e-4f
*/
inline void setTolerance(Float tolerance) { m_tolerance = tolerance; }
/**
* \brief Fill the actual and reference bin counts
*
* Please see the class documentation for a description
* on how to invoke this function
*/
void fill(
const boost::function<boost::tuple<Vector, Float, EMeasure>()> &sampleFn,
const boost::function<Float (const Vector &, EMeasure)> &pdfFn);
/**
* \brief Dump the bin counts to a file using MATLAB format
*/
void dumpTables(const fs::path &filename);
/**
* \brief Perform the actual chi-square test
*
* \param pvalThresh
* The implementation will reject the null hypothesis
* when the computed p-value lies below this parameter
* (default: 0.01f)
*
* \return A status value of type \ref ETestResult
*/
ETestResult runTest(Float pvalThresh = 0.01f);
MTS_DECLARE_CLASS()
protected:
/// Release all memory
virtual ~ChiSquare();
/// Functor to evaluate the pdf values in parallel using OpenMP
static void integrand(
const boost::function<Float (const Vector &, EMeasure)> &pdfFn,
size_t nPts, const Float *in, Float *out) {
#if defined(MTS_OPENMP)
#pragma omp parallel for
#endif
for (int i=0; i<(int) nPts; ++i)
out[i] = pdfFn(sphericalDirection(in[2*i], in[2*i+1]), ESolidAngle)
* std::sin(in[2*i]);
}
private:
ELogLevel m_logLevel;
Float m_tolerance;
int m_thetaBins, m_phiBins;
int m_numTests;
size_t m_sampleCount;
Float *m_table;
Float *m_refTable;
};
MTS_NAMESPACE_END
#endif /* __MITSUBA_CORE_CHISQUARE_H_ */
|
task_yield.c | #include <omp.h>
void something_useful ( void );
void something_critical ( void );
void foo ( omp_lock_t * lock, int n )
{
int i;
for ( i = 0; i < n; i++ )
#pragma omp task
{
something_useful();
while ( !omp_test_lock(lock) ) {
#pragma omp taskyield
}
something_critical();
omp_unset_lock(lock);
}
}
|
threaded_eigen_matrix.h | /*
* Copyright (c) 2017 Ivan Iakoupov
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef THREADED_EIGEN_MATRIX_H
#define THREADED_EIGEN_MATRIX_H
#include <omp.h>
#include <vector>
#include "Eigen/Dense"
class ThreadedEigenMatrix
{
int m_rows;
int m_cols;
int m_threads;
std::vector<Eigen::MatrixXcd> m_matrix_chunks;
std::vector<int> m_block_start_indices;
public:
ThreadedEigenMatrix() :
m_threads(omp_get_max_threads()),
m_rows(0),
m_cols(0)
{}
explicit ThreadedEigenMatrix(std::function<std::complex<double>(int,int)> f,
int rows, int cols) :
m_threads(omp_get_max_threads()),
m_rows(rows),
m_cols(cols)
{
m_matrix_chunks.resize(m_threads);
const int normal_chunk_size = m_rows/m_threads;
std::vector<int> chunk_sizes(m_threads);
for (int n = 0; n < m_threads; ++n) {
chunk_sizes[n] = normal_chunk_size;
}
// The last chunk size can be different
const int last_chunk_size = m_rows - (m_threads-1)*normal_chunk_size;
chunk_sizes[m_threads-1] = last_chunk_size;
#pragma omp parallel for
for (int n = 0; n < m_threads; ++n) {
const int chunk_size = chunk_sizes[n];
m_matrix_chunks[n] = Eigen::MatrixXcd::Zero(chunk_size, m_cols);
for (int i = 0; i < chunk_size; ++i) {
for (int j = 0; j < m_cols; ++j) {
m_matrix_chunks[n](i,j) = f(i + n*normal_chunk_size, j);
}
}
}
m_block_start_indices.resize(m_threads);
int rowsSum = 0;
for (int j = 0; j < m_threads; ++j) {
m_block_start_indices[j] = rowsSum;
rowsSum += m_matrix_chunks[j].rows();
}
}
explicit ThreadedEigenMatrix(Eigen::MatrixXcd M) :
m_threads(omp_get_max_threads()),
m_rows(M.rows()),
m_cols(M.cols())
{
m_matrix_chunks.resize(m_threads);
const int normal_chunk_size = m_rows/m_threads;
std::vector<int> chunk_sizes(m_threads);
for (int n = 0; n < m_threads; ++n) {
chunk_sizes[n] = normal_chunk_size;
}
// The last chunk size can be different
const int last_chunk_size = m_rows - (m_threads-1)*normal_chunk_size;
chunk_sizes[m_threads-1] = last_chunk_size;
#pragma omp parallel for
for (int n = 0; n < m_threads; ++n) {
const int chunk_size = chunk_sizes[n];
m_matrix_chunks[n] = Eigen::MatrixXcd::Zero(chunk_size, m_cols);
for (int i = 0; i < chunk_size; ++i) {
for (int j = 0; j < m_cols; ++j) {
m_matrix_chunks[n](i,j) = M(i + n*normal_chunk_size, j);
}
}
}
m_block_start_indices.resize(m_threads);
int rowsSum = 0;
for (int j = 0; j < m_threads; ++j) {
m_block_start_indices[j] = rowsSum;
rowsSum += m_matrix_chunks[j].rows();
}
}
Eigen::VectorXcd operator*(const Eigen::VectorXcd &v) const
{
Eigen::VectorXcd ret(m_rows);
#pragma omp parallel for
for (int j = 0; j < m_threads; ++j) {
const int block_start = m_block_start_indices[j];
const int block_end = block_start + m_matrix_chunks[j].rows();
Eigen::VectorXcd ret_i = m_matrix_chunks[j]*v;
for (int k = block_start; k < block_end; ++k) {
ret(k) = ret_i(k-block_start);
}
}
return ret;
}
};
#endif // THREADED_EIGEN_MATRIX_H
|
matmult-mpi.c | /*
* Copyright (c) 2014-2017, Sebastien Vincent
*
* Distributed under the terms of the BSD 3-clause License.
* See the LICENSE file for details.
*/
/**
* \file matmult-mpi.c
* \brief Matrix multiplication in C/MPI.
* \author Sebastien Vincent
* \date 2018
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <mpi.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/**
* \brief Default row size.
*/
static const size_t DEFAULT_ROW_SIZE = 1024;
/**
* \brief Default column size.
*/
static const size_t DEFAULT_COLUMN_SIZE = 1024;
/**
* \struct configuration
* \brief Configuration.
*/
struct configuration
{
/**
* \brief Row size.
*/
size_t m;
/**
* \brief Colummn size.
*/
size_t n;
/**
* \brief Print input and output matrixes.
*/
int print_matrix;
/**
* \brief Number of threads.
*/
size_t threads;
};
/**
* \brief Initializes the matrixes.
* \param mat1 first matrix.
* \param mat2 second matrix.
* \param m row size of the matrix.
* \param n column size of the matrix.
*/
void mat_init(int* mat1, int* mat2, size_t m, size_t n)
{
for(size_t i = 0 ; i < (m * n) ; i++)
{
mat1[i] = rand();
mat2[i] = rand();
}
}
/**
* \brief Print the matrix content on stdout.
* \param mat the matrix.
* \param m row size of the matrix.
* \param n column size of the matrix.
*/
void mat_print(int* mat, size_t m, size_t n)
{
for(size_t i = 0 ; i < m ; i++)
{
for(size_t j = 0 ; j < n ; j++)
{
fprintf(stdout, "%d ", mat[i * m + j]);
}
fprintf(stdout, "\n");
}
}
/**
* \brief Performs multiplication of matrixes.
* \param mat1 first matrix.
* \param mat2 second matrix.
* \param result result matrix.
* \param m row size of first matrix.
* \param n column size of first matrix.
* \param w row size of second matrix.
* \param rank MPI rank.
* \param world_size Total number of MPI nodes.
* \param threads number of threads to use (OpenMP only).
* \return 0 if success, -1 if matrixes cannot be multiplied.
*/
int mat_mult_mpi(int* mat1, int* mat2, int* result, size_t m, size_t n,
size_t w, size_t rank, size_t world_size, size_t threads)
{
int* res = malloc(sizeof(int) * (m * n) / world_size);
(void)rank;
(void)threads;
if(n != w || !res)
{
return -1;
}
/* transmit row to each process */
MPI_Scatter(mat1, (m * n) / world_size, MPI_INT, mat1, (m * n) / world_size,
MPI_INT, 0 /* rank root */, MPI_COMM_WORLD);
/* broadcast second matrix to other nodes */
MPI_Bcast(mat2, m * n, MPI_INT, 0, MPI_COMM_WORLD);
/* matrix multiply */
#if _OPENMP
/* to set spread way, add to next line: proc_bind(spread) */
#pragma omp parallel num_threads(threads)
#endif
for(size_t i = 0 ; i < (m / world_size) ; i++)
{
#if _OPENMP
#pragma omp for schedule(static)
#endif
for(size_t j = 0 ; j < n ; j++)
{
int tmp = 0;
for(size_t k = 0 ; k < w ; k++)
{
tmp += mat1[i * w + k] * mat2[k * n + j];
}
res[i * m + j] = tmp;
}
}
MPI_Gather(res, (m * n) / world_size, MPI_INT, result, (m * n) / world_size,
MPI_INT, 0, MPI_COMM_WORLD);
free(res);
MPI_Barrier(MPI_COMM_WORLD);
return 0;
}
/**
* \brief Print help.
* \param program program name.
*/
void print_help(const char* program)
{
fprintf(stdout, "Usage: %s [-m row size] [-n column size] "
#ifdef _OPENMP
"[-t thread_number]"
#endif
"[-p] [-h]\n\n"
" -h\t\tDisplay this help\n"
#ifdef _OPENMP
" -t nb\t\tDefines number of threads to use\n"
#endif
" -p\t\tPrint the input and output matrixes\n"
" -m row\tDefine row size (default 1024)\n"
" -n col\tDefine column size (default 1024)\n",
program);
}
/**
* \brief Parse command line.
* \param argc number of arguments.
* \param argv array of arguments.
* \param configuration configuration parameters.
* \return 0 to exit with success, -1 to exit with error, otherwise continue.
*/
int parse_cmdline(int argc, char** argv,
struct configuration* configuration)
{
/*
* h: print help and exit
* p: print input and output matrixes
* m: row size
* n: column size
* t: number of threads to use
*/
static const char* options = "hpm:n:t:";
int opt = 0;
int print_matrix = 0;
long m = DEFAULT_ROW_SIZE;
long n = DEFAULT_COLUMN_SIZE;
int threads = sysconf(_SC_NPROCESSORS_ONLN);
int ret = 1;
assert(configuration);
while((opt = getopt(argc, argv, options)) != -1)
{
switch(opt)
{
case 'h':
/* help */
print_help(argv[0]);
return 0;
break;
case 'p':
print_matrix = 1;
break;
case 'm':
m = atol(optarg);
if(m < 2)
{
fprintf(stderr, "Bad argument for '-m' %ld\n", m);
ret = -1;
}
break;
case 'n':
n = atol(optarg);
if(n < 2)
{
fprintf(stderr, "Bad argument for '-n' %ld\n", n);
ret = -1;
}
break;
case 't':
threads = atol(optarg);
if(threads <= 0)
{
fprintf(stderr, "Bad argument for '-t': %s\n", optarg);
ret = EXIT_FAILURE;
}
break;
default:
fprintf(stderr, "Bad option (%c)\n", optopt);
ret = -1;
break;
}
}
configuration->print_matrix = print_matrix;
configuration->m = m;
configuration->n = n;
#ifdef _OPENMP
configuration->threads = threads;
#else
configuration->threads = 1;
#endif
return ret;
}
/**
* \brief Entry point of the program.
* \param argc number of arguments.
* \param argv array of arguments.
* \return EXIT_SUCCESS or EXIT_FAILURE.
*/
int main(int argc, char** argv)
{
int* mat1 = NULL;
int* mat2 = NULL;
int* mat3 = NULL;
size_t m = DEFAULT_ROW_SIZE;
size_t n = DEFAULT_COLUMN_SIZE;
size_t w = DEFAULT_COLUMN_SIZE;
int print_matrix = 0;
struct configuration config;
double start = 0;
double end = 0;
int ret = 0;
int world_size = 0;
int world_rank = 0;
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len = 0;
ret = parse_cmdline(argc, argv, &config);
if(ret == 0)
{
exit(EXIT_SUCCESS);
}
else if(ret == -1)
{
exit(EXIT_FAILURE);
}
m = config.m;
n = config.n;
w = config.n;
print_matrix = config.print_matrix;
/* MPI initialization */
#if _OPENMP
int required = MPI_THREAD_SERIALIZED;
int provided = 0;
if(MPI_Init_thread(NULL, NULL, required, &provided) != MPI_SUCCESS)
{
fprintf(stderr, "Failed to initialize MPI.\n");
exit(EXIT_FAILURE);
}
if(provided < required)
{
fprintf(stderr, "Failed to configure MPI thread.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
#else
if(MPI_Init(NULL, NULL) != MPI_SUCCESS)
{
fprintf(stderr, "Failed to initialize MPI.\n");
exit(EXIT_FAILURE);
}
#endif
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
MPI_Get_processor_name(processor_name, &name_len);
if(m % world_size)
{
if(world_rank == 0)
{
fprintf(stderr,
"Matrix size (%zu) not divisible by number of processor (%d)\n",
m * n,
world_size);
}
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
fprintf(stdout, "MPI from processor %s, rank %d out of %d\n",
processor_name, world_rank, world_size);
mat1 = malloc((m * n) * sizeof(int));
mat2 = malloc((m * n) * sizeof(int));
mat3 = malloc((m * n) * sizeof(int));
if(!mat1 || !mat2 || !mat3)
{
perror("malloc");
free(mat1);
free(mat2);
free(mat3);
MPI_Finalize();
exit(EXIT_FAILURE);
}
/* random initialization */
srand(time(NULL));
if(world_rank == 0)
{
mat_init(mat1, mat2, m, n);
if(print_matrix)
{
fprintf(stdout, "Matrix 1:\n");
mat_print(mat1, m, n);
fprintf(stdout, "Matrix 2:\n");
mat_print(mat2, m, n);
}
fprintf(stdout, "Compute with %zu MPI node(s) with %zu thread(s) \n",
(size_t)world_size, config.threads);
}
start = MPI_Wtime();
if(mat_mult_mpi(mat1, mat2, mat3, m, n, w, world_rank, world_size,
config.threads) == -1)
{
fprintf(stderr, "Matrixes cannot be multiplied\n");
ret = EXIT_FAILURE;
}
else
{
end = MPI_Wtime();
if(world_rank == 0)
{
fprintf(stdout, "Multiplication success: %f ms\n", (end - start) * 1000);
if(print_matrix)
{
mat_print(mat3, m, n);
}
}
ret = EXIT_SUCCESS;
}
/* free resources */
free(mat1);
free(mat2);
free(mat3);
MPI_Finalize();
return ret;
}
|
bloom.c | /*******************************************************************************
***
*** Author: Tyler Barrus
*** email: barrust@gmail.com
***
*** Version: 1.9.0
***
*** License: MIT 2015
***
*******************************************************************************/
#include <stdlib.h>
#include <math.h> /* pow, exp */
#include <stdio.h> /* printf */
#include <string.h> /* strlen */
#include <fcntl.h> /* O_RDWR */
#include <sys/mman.h> /* mmap, mummap */
#include <sys/types.h> /* */
#include <sys/stat.h> /* fstat */
#include <unistd.h> /* close */
#include "bloom.h"
#define CHECK_BIT_CHAR(c, k) ((c) & (1 << (k)))
#define CHECK_BIT(A, k) (CHECK_BIT_CHAR(A[((k) / 8)], ((k) % 8)))
// #define set_bit(A,k) (A[((k) / 8)] |= (1 << ((k) % 8)))
// #define clear_bit(A,k) (A[((k) / 8)] &= ~(1 << ((k) % 8)))
/* define some constant magic looking numbers */
#define CHAR_LEN 8
#define LOG_TWO_SQUARED 0.480453013918201388143813800 // 0.4804530143737792968750000
// 0.4804530143737792968750000
#define LOG_TWO 0.693147180559945286226764000
/* https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetTable */
#define B2(n) n, n+1, n+1, n+2
#define B4(n) B2(n), B2(n+1), B2(n+1), B2(n+2)
#define B6(n) B4(n), B4(n+1), B4(n+1), B4(n+2)
static const unsigned char bits_set_table[256] = {B6(0), B6(1), B6(1), B6(2)};
/*******************************************************************************
*** PRIVATE FUNCTIONS
*******************************************************************************/
static uint64_t* __default_hash(int num_hashes, const char *str);
static uint64_t __fnv_1a(const char *key, int seed);
static void __calculate_optimal_hashes(BloomFilter *bf);
static void __read_from_file(BloomFilter *bf, FILE *fp, short on_disk, const char *filename);
static void __write_to_file(BloomFilter *bf, FILE *fp, short on_disk);
static void __update_elements_added_on_disk(BloomFilter *bf);
static int __sum_bits_set_char(unsigned char c);
static int __check_if_union_or_intersection_ok(BloomFilter *res, BloomFilter *bf1, BloomFilter *bf2);
int bloom_filter_init_alt(BloomFilter *bf, uint64_t estimated_elements, float false_positive_rate, BloomHashFunction hash_function) {
if(estimated_elements == 0 || estimated_elements > UINT64_MAX || false_positive_rate <= 0.0 || false_positive_rate >= 1.0) {
return BLOOM_FAILURE;
}
bf->estimated_elements = estimated_elements;
bf->false_positive_probability = false_positive_rate;
__calculate_optimal_hashes(bf);
bf->bloom = (unsigned char*)calloc(bf->bloom_length + 1, sizeof(char)); // pad to ensure no running off the end
bf->elements_added = 0;
bloom_filter_set_hash_function(bf, hash_function);
bf->__is_on_disk = 0; // not on disk
return BLOOM_SUCCESS;
}
int bloom_filter_init_on_disk_alt(BloomFilter *bf, uint64_t estimated_elements, float false_positive_rate, const char *filepath, BloomHashFunction hash_function) {
if(estimated_elements == 0 || estimated_elements > UINT64_MAX || false_positive_rate <= 0.0 || false_positive_rate >= 1.0) {
return BLOOM_FAILURE;
}
bf->estimated_elements = estimated_elements;
bf->false_positive_probability = false_positive_rate;
__calculate_optimal_hashes(bf);
bf->elements_added = 0;
FILE *fp;
fp = fopen(filepath, "w+b");
if (fp == NULL) {
fprintf(stderr, "Can't open file %s!\n", filepath);
return BLOOM_FAILURE;
}
__write_to_file(bf, fp, 1);
fclose(fp);
// slightly ineffecient to redo some of the calculations...
return bloom_filter_import_on_disk_alt(bf, filepath, hash_function);
}
void bloom_filter_set_hash_function(BloomFilter *bf, BloomHashFunction hash_function) {
bf->hash_function = (hash_function == NULL) ? __default_hash : hash_function;
}
int bloom_filter_destroy(BloomFilter *bf) {
if (bf->__is_on_disk == 0) {
free(bf->bloom);
} else {
fclose(bf->filepointer);
munmap(bf->bloom, bf->__filesize);
}
bf->bloom = NULL;
bf->filepointer = NULL;
bf->elements_added = 0;
bf->estimated_elements = 0;
bf->false_positive_probability = 0;
bf->number_hashes = 0;
bf->number_bits = 0;
bf->hash_function = NULL;
bf->__is_on_disk = 0;
bf->__filesize = 0;
return BLOOM_SUCCESS;
}
int bloom_filter_clear(BloomFilter *bf) {
for (unsigned long i = 0; i < bf->bloom_length; ++i) {
bf->bloom[i] = 0;
}
bf->elements_added = 0;
__update_elements_added_on_disk(bf);
return BLOOM_SUCCESS;
}
void bloom_filter_stats(BloomFilter *bf) {
const char *is_on_disk = (bf->__is_on_disk == 0 ? "no" : "yes");
uint64_t size_on_disk = bloom_filter_export_size(bf);
printf("BloomFilter\n\
bits: %" PRIu64 "\n\
estimated elements: %" PRIu64 "\n\
number hashes: %d\n\
max false positive rate: %f\n\
bloom length (8 bits): %ld\n\
elements added: %" PRIu64 "\n\
estimated elements added: %" PRIu64 "\n\
current false positive rate: %f\n\
export size (bytes): %" PRIu64 "\n\
number bits set: %" PRIu64 "\n\
is on disk: %s\n",
bf->number_bits, bf->estimated_elements, bf->number_hashes,
bf->false_positive_probability, bf->bloom_length, bf->elements_added,
bloom_filter_estimate_elements(bf),
bloom_filter_current_false_positive_rate(bf), size_on_disk,
bloom_filter_count_set_bits(bf), is_on_disk);
}
int bloom_filter_add_string(BloomFilter *bf, const char *str) {
uint64_t *hashes = bloom_filter_calculate_hashes(bf, str, bf->number_hashes);
int res = bloom_filter_add_string_alt(bf, hashes, bf->number_hashes);
free(hashes);
return res;
}
int bloom_filter_check_string(BloomFilter *bf, const char *str) {
uint64_t *hashes = bloom_filter_calculate_hashes(bf, str, bf->number_hashes);
int res = bloom_filter_check_string_alt(bf, hashes, bf->number_hashes);
free(hashes);
return res;
}
uint64_t* bloom_filter_calculate_hashes(BloomFilter *bf, const char *str, unsigned int number_hashes) {
return bf->hash_function(number_hashes, str);
}
/* Add a string to a bloom filter using the defined hashes */
int bloom_filter_add_string_alt(BloomFilter *bf, uint64_t *hashes, unsigned int number_hashes_passed) {
if (number_hashes_passed < bf->number_hashes) {
fprintf(stderr, "Error: not enough hashes passed in to correctly check!\n");
return BLOOM_FAILURE;
}
for (unsigned int i = 0; i < bf->number_hashes; ++i) {
unsigned long idx = (hashes[i] % bf->number_bits) / 8;
int bit = (hashes[i] % bf->number_bits) % 8;
#pragma omp atomic update
bf->bloom[idx] |= (1 << bit); // set the bit
}
#pragma omp atomic update
bf->elements_added++;
__update_elements_added_on_disk(bf);
return BLOOM_SUCCESS;
}
/* Check if a string is in the bloom filter using the passed hashes */
int bloom_filter_check_string_alt(BloomFilter *bf, uint64_t *hashes, unsigned int number_hashes_passed) {
if (number_hashes_passed < bf->number_hashes) {
fprintf(stderr, "Error: not enough hashes passed in to correctly check!\n");
return BLOOM_FAILURE;
}
unsigned int i;
int r = BLOOM_SUCCESS;
for (i = 0; i < bf->number_hashes; ++i) {
int tmp_check = CHECK_BIT(bf->bloom, (hashes[i] % bf->number_bits));
if (tmp_check == 0) {
r = BLOOM_FAILURE;
break; // no need to continue checking
}
}
return r;
}
float bloom_filter_current_false_positive_rate(BloomFilter *bf) {
int num = bf->number_hashes * bf->elements_added;
double d = -num / (float) bf->number_bits;
double e = exp(d);
return pow((1 - e), bf->number_hashes);
}
int bloom_filter_export(BloomFilter *bf, const char *filepath) {
// if the bloom is initialized on disk, no need to export it
if (bf->__is_on_disk == 1) {
return BLOOM_SUCCESS;
}
FILE *fp;
fp = fopen(filepath, "w+b");
if (fp == NULL) {
fprintf(stderr, "Can't open file %s!\n", filepath);
return BLOOM_FAILURE;
}
__write_to_file(bf, fp, 0);
fclose(fp);
return BLOOM_SUCCESS;
}
int bloom_filter_import_alt(BloomFilter *bf, const char *filepath, BloomHashFunction hash_function) {
FILE *fp;
fp = fopen(filepath, "r+b");
if (fp == NULL) {
fprintf(stderr, "Can't open file %s!\n", filepath);
return BLOOM_FAILURE;
}
__read_from_file(bf, fp, 0, NULL);
fclose(fp);
bloom_filter_set_hash_function(bf, hash_function);
bf->__is_on_disk = 0; // not on disk
return BLOOM_SUCCESS;
}
int bloom_filter_import_on_disk_alt(BloomFilter *bf, const char *filepath, BloomHashFunction hash_function) {
bf->filepointer = fopen(filepath, "r+b");
if (bf->filepointer == NULL) {
fprintf(stderr, "Can't open file %s!\n", filepath);
return BLOOM_FAILURE;
}
__read_from_file(bf, bf->filepointer, 1, filepath);
// don't close the file pointer here...
bloom_filter_set_hash_function(bf, hash_function);
bf->__is_on_disk = 1; // on disk
return BLOOM_SUCCESS;
}
char* bloom_filter_export_hex_string(BloomFilter *bf) {
uint64_t i, bytes = sizeof(uint64_t) * 2 + sizeof(float) + (bf->bloom_length);
char* hex = (char*)calloc((bytes * 2 + 1), sizeof(char));
for (i = 0; i < bf->bloom_length; ++i) {
sprintf(hex + (i * 2), "%02x", bf->bloom[i]); // not the fastest way, but works
}
i = bf->bloom_length * 2;
sprintf(hex + i, "%016" PRIx64 "", bf->estimated_elements);
i += 16; // 8 bytes * 2 for hex
sprintf(hex + i, "%016" PRIx64 "", bf->elements_added);
unsigned int ui;
memcpy(&ui, &bf->false_positive_probability, sizeof (ui));
i += 16; // 8 bytes * 2 for hex
sprintf(hex + i, "%08x", ui);
return hex;
}
int bloom_filter_import_hex_string_alt(BloomFilter *bf, const char *hex, BloomHashFunction hash_function) {
uint64_t len = strlen(hex);
if (len % 2 != 0) {
fprintf(stderr, "Unable to parse; exiting\n");
return BLOOM_FAILURE;
}
char fpr[9] = {0};
char est_els[17] = {0};
char ins_els[17] = {0};
memcpy(fpr, hex + (len - 8), 8);
memcpy(ins_els, hex + (len - 24), 16);
memcpy(est_els, hex + (len - 40), 16);
uint32_t t_fpr;
bf->estimated_elements = strtoull(est_els, NULL, 16);
bf->elements_added = strtoull(ins_els, NULL, 16);
sscanf(fpr, "%x", &t_fpr);
float f;
memcpy(&f, &t_fpr, sizeof(float));
bf->false_positive_probability = f;
bloom_filter_set_hash_function(bf, hash_function);
__calculate_optimal_hashes(bf);
bf->bloom = (unsigned char*)calloc(bf->bloom_length + 1, sizeof(char)); // pad
bf->__is_on_disk = 0; // not on disk
uint64_t i;
for (i = 0; i < bf->bloom_length; ++i) {
sscanf(hex + (i * 2), "%2hx", (short unsigned int*)&bf->bloom[i]);
}
return BLOOM_SUCCESS;
}
uint64_t bloom_filter_export_size(BloomFilter *bf) {
return (uint64_t)(bf->bloom_length * sizeof(unsigned char)) + (2 * sizeof(uint64_t)) + sizeof(float);
}
uint64_t bloom_filter_count_set_bits(BloomFilter *bf) {
uint64_t i, res = 0;
for (i = 0; i < bf->bloom_length; ++i) {
res += __sum_bits_set_char(bf->bloom[i]);
}
return res;
}
uint64_t bloom_filter_estimate_elements(BloomFilter *bf) {
return bloom_filter_estimate_elements_by_values(bf->number_bits, bloom_filter_count_set_bits(bf), bf->number_hashes);
}
uint64_t bloom_filter_estimate_elements_by_values(uint64_t m, uint64_t X, int k) {
/* m = number bits; X = count of flipped bits; k = number hashes */
double log_n = log(1 - ((double) X / (double) m));
return (uint64_t)-(((double) m / k) * log_n);
}
int bloom_filter_union(BloomFilter *res, BloomFilter *bf1, BloomFilter *bf2) {
// Ensure the bloom filters can be unioned
if (__check_if_union_or_intersection_ok(res, bf1, bf2) == BLOOM_FAILURE) {
return BLOOM_FAILURE;
}
uint64_t i;
for (i = 0; i < bf1->bloom_length; ++i) {
res->bloom[i] = bf1->bloom[i] | bf2->bloom[i];
}
bloom_filter_set_elements_to_estimated(res);
return BLOOM_SUCCESS;
}
uint64_t bloom_filter_count_union_bits_set(BloomFilter *bf1, BloomFilter *bf2) {
// Ensure the bloom filters can be unioned
if (__check_if_union_or_intersection_ok(bf1, bf1, bf2) == BLOOM_FAILURE) { // use bf1 as res
return BLOOM_FAILURE;
}
uint64_t i, res = 0;
for (i = 0; i < bf1->bloom_length; ++i) {
res += __sum_bits_set_char(bf1->bloom[i] | bf2->bloom[i]);
}
return res;
}
int bloom_filter_intersect(BloomFilter *res, BloomFilter *bf1, BloomFilter *bf2) {
// Ensure the bloom filters can be used in an intersection
if (__check_if_union_or_intersection_ok(res, bf1, bf2) == BLOOM_FAILURE) {
return BLOOM_FAILURE;
}
uint64_t i;
for (i = 0; i < bf1->bloom_length; ++i) {
res->bloom[i] = bf1->bloom[i] & bf2->bloom[i];
}
bloom_filter_set_elements_to_estimated(res);
return BLOOM_SUCCESS;
}
void bloom_filter_set_elements_to_estimated(BloomFilter *bf) {
bf->elements_added = bloom_filter_estimate_elements(bf);
__update_elements_added_on_disk(bf);
}
uint64_t bloom_filter_count_intersection_bits_set(BloomFilter *bf1, BloomFilter *bf2) {
// Ensure the bloom filters can be used in an intersection
if (__check_if_union_or_intersection_ok(bf1, bf1, bf2) == BLOOM_FAILURE) { // use bf1 as res
return BLOOM_FAILURE;
}
uint64_t i, res = 0;
for (i = 0; i < bf1->bloom_length; ++i) {
res += __sum_bits_set_char(bf1->bloom[i] & bf2->bloom[i]);
}
return res;
}
float bloom_filter_jaccard_index(BloomFilter *bf1, BloomFilter *bf2) {
// Ensure the bloom filters can be used in an intersection and union
if (__check_if_union_or_intersection_ok(bf1, bf1, bf2) == BLOOM_FAILURE) { // use bf1 as res
return (float)BLOOM_FAILURE;
}
float set_union_bits = (float)bloom_filter_count_union_bits_set(bf1, bf2);
if (set_union_bits == 0) { // check for divide by 0 error
return 1.0; // they must be both empty for this to occur and are therefore the same
}
return (float)bloom_filter_count_intersection_bits_set(bf1, bf2) / set_union_bits;
}
/*******************************************************************************
* PRIVATE FUNCTIONS
*******************************************************************************/
static void __calculate_optimal_hashes(BloomFilter *bf) {
// calc optimized values
long n = bf->estimated_elements;
float p = bf->false_positive_probability;
uint64_t m = ceil((-n * logl(p)) / LOG_TWO_SQUARED); // AKA pow(log(2), 2);
unsigned int k = round(LOG_TWO * m / n); // AKA log(2.0);
// set paramenters
bf->number_hashes = k; // should check to make sure it is at least 1...
bf->number_bits = m;
long num_pos = ceil(m / (CHAR_LEN * 1.0));
bf->bloom_length = num_pos;
}
static int __sum_bits_set_char(unsigned char c) {
return bits_set_table[c];
}
static int __check_if_union_or_intersection_ok(BloomFilter *res, BloomFilter *bf1, BloomFilter *bf2) {
if (res->number_hashes != bf1->number_hashes || bf1->number_hashes != bf2->number_hashes) {
return BLOOM_FAILURE;
} else if (res->number_bits != bf1->number_bits || bf1->number_bits != bf2->number_bits) {
return BLOOM_FAILURE;
} else if (res->hash_function != bf1->hash_function || bf1->hash_function != bf2->hash_function) {
return BLOOM_FAILURE;
}
return BLOOM_SUCCESS;
}
/* NOTE: this assumes that the file handler is open and ready to use */
static void __write_to_file(BloomFilter *bf, FILE *fp, short on_disk) {
if (on_disk == 0) {
fwrite(bf->bloom, bf->bloom_length, 1, fp);
} else {
// will need to write out everything by hand
uint64_t i;
for (i = 0; i < bf->bloom_length; ++i) {
fputc(0, fp);
}
}
fwrite(&bf->estimated_elements, sizeof(uint64_t), 1, fp);
fwrite(&bf->elements_added, sizeof(uint64_t), 1, fp);
fwrite(&bf->false_positive_probability, sizeof(float), 1, fp);
}
/* NOTE: this assumes that the file handler is open and ready to use */
static void __read_from_file(BloomFilter *bf, FILE *fp, short on_disk, const char *filename) {
int offset = sizeof(uint64_t) * 2 + sizeof(float);
fseek(fp, offset * -1, SEEK_END);
fread(&bf->estimated_elements, sizeof(uint64_t), 1, fp);
fread(&bf->elements_added, sizeof(uint64_t), 1, fp);
fread(&bf->false_positive_probability, sizeof(float), 1, fp);
__calculate_optimal_hashes(bf);
rewind(fp);
if(on_disk == 0) {
bf->bloom = (unsigned char*)calloc(bf->bloom_length + 1, sizeof(char));
size_t read;
read = fread(bf->bloom, sizeof(char), bf->bloom_length, fp);
if (read != bf->bloom_length) {
perror("__read_from_file: ");
exit(1);
}
} else {
struct stat buf;
int fd = open(filename, O_RDWR);
if (fd < 0) {
perror("open: ");
exit(1);
}
fstat(fd, &buf);
bf->__filesize = buf.st_size;
bf->bloom = (unsigned char*)mmap((caddr_t)0, bf->__filesize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (bf->bloom == (unsigned char*) - 1) {
perror("mmap: ");
exit(1);
}
// close the file descriptor
close(fd);
}
}
static void __update_elements_added_on_disk(BloomFilter* bf) {
if (bf->__is_on_disk == 1) { // only do this if it is on disk!
int offset = sizeof(uint64_t) + sizeof(float);
#pragma omp critical (bloom_filter_critical_on_disk)
{
fseek(bf->filepointer, offset * -1, SEEK_END);
fwrite(&bf->elements_added, sizeof(uint64_t), 1, bf->filepointer);
}
}
}
/* NOTE: The caller will free the results */
static uint64_t* __default_hash(int num_hashes, const char *str) {
uint64_t *results = (uint64_t*)calloc(num_hashes, sizeof(uint64_t));
int i;
for (i = 0; i < num_hashes; ++i) {
results[i] = __fnv_1a(str, i);
}
return results;
}
static uint64_t __fnv_1a(const char *key, int seed) {
// FNV-1a hash (http://www.isthe.com/chongo/tech/comp/fnv/)
int i, len = strlen(key);
uint64_t h = 14695981039346656037ULL + (31 * seed); // FNV_OFFSET 64 bit with magic number seed
for (i = 0; i < len; ++i){
h = h ^ (unsigned char) key[i];
h = h * 1099511628211ULL; // FNV_PRIME 64 bit
}
return h;
}
|
ps.c | /*** Some usefull math macros ***/
#define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a))
static double mnarg1,mnarg2;
#define FMAX(a,b) (mnarg1=(a),mnarg2=(b),(mnarg1) > (mnarg2) ?\
(mnarg1) : (mnarg2))
static double mnarg1,mnarg2;
#define FMIN(a,b) (mnarg1=(a),mnarg2=(b),(mnarg1) < (mnarg2) ?\
(mnarg1) : (mnarg2))
#define ERFC_NPTS (int) 75
#define ERFC_PARAM_DELTA (float) 0.1
static double log_erfc_table[ERFC_NPTS], erfc_params[ERFC_NPTS];
static gsl_interp_accel *erfc_acc;
static gsl_spline *erfc_spline;
#define NGaussLegendre 40 //defines the number of points in the Gauss-Legendre quadrature integration
#define NMass 300
#define NSFR_high 200
#define NSFR_low 250
#define NGL_SFR 100 // 100
#define NMTURN 50//100
#define LOG10_MTURN_MAX ((double)(10))
#define LOG10_MTURN_MIN ((double)(5.-9e-8))
#define NR_END 1
#define FREE_ARG char*
#define MM 7
#define NSTACK 50
#define EPS2 3.0e-11
#define Luv_over_SFR (double)(1./1.15/1e-28)
// Luv/SFR = 1 / 1.15 x 10^-28 [M_solar yr^-1/erg s^-1 Hz^-1]
// G. Sun and S. R. Furlanetto (2016) MNRAS, 417, 33
#define delta_lnMhalo (double)(5e-6)
#define Mhalo_min (double)(1e6)
#define Mhalo_max (double)(1e16)
float calibrated_NF_min;
double *deltaz, *deltaz_smoothed, *NeutralFractions, *z_Q, *Q_value, *nf_vals, *z_vals;
int N_NFsamples,N_extrapolated, N_analytic, N_calibrated, N_deltaz;
bool initialised_ComputeLF = false;
gsl_interp_accel *LF_spline_acc;
gsl_spline *LF_spline;
gsl_interp_accel *deriv_spline_acc;
gsl_spline *deriv_spline;
struct CosmoParams *cosmo_params_ps;
struct UserParams *user_params_ps;
struct FlagOptions *flag_options_ps;
//double sigma_norm, R, theta_cmb, omhh, z_equality, y_d, sound_horizon, alpha_nu, f_nu, f_baryon, beta_c, d2fact, R_CUTOFF, DEL_CURR, SIG_CURR;
double sigma_norm, theta_cmb, omhh, z_equality, y_d, sound_horizon, alpha_nu, f_nu, f_baryon, beta_c, d2fact, R_CUTOFF, DEL_CURR, SIG_CURR;
float MinMass, mass_bin_width, inv_mass_bin_width;
double sigmaparam_FgtrM_bias(float z, float sigsmallR, float del_bias, float sig_bias);
float *Mass_InterpTable, *Sigma_InterpTable, *dSigmadm_InterpTable;
float *log10_overdense_spline_SFR, *log10_Nion_spline, *Overdense_spline_SFR, *Nion_spline;
float *prev_log10_overdense_spline_SFR, *prev_log10_Nion_spline, *prev_Overdense_spline_SFR, *prev_Nion_spline;
float *Mturns, *Mturns_MINI;
float *log10_Nion_spline_MINI, *Nion_spline_MINI;
float *prev_log10_Nion_spline_MINI, *prev_Nion_spline_MINI;
float *xi_SFR,*wi_SFR, *xi_SFR_Xray, *wi_SFR_Xray;
float *overdense_high_table, *overdense_low_table, *log10_overdense_low_table;
float **log10_SFRD_z_low_table, **SFRD_z_high_table;
float **log10_SFRD_z_low_table_MINI, **SFRD_z_high_table_MINI;
double *lnMhalo_param, *Muv_param, *Mhalo_param;
double *log10phi, *M_uv_z, *M_h_z;
double *lnMhalo_param_MINI, *Muv_param_MINI, *Mhalo_param_MINI;
double *log10phi_MINI; *M_uv_z_MINI, *M_h_z_MINI;
double *deriv, *lnM_temp, *deriv_temp;
double *z_val, *z_X_val, *Nion_z_val, *SFRD_val;
double *Nion_z_val_MINI, *SFRD_val_MINI;
void initialiseSigmaMInterpTable(float M_Min, float M_Max);
void freeSigmaMInterpTable();
void initialiseGL_Nion(int n, float M_Min, float M_Max);
void initialiseGL_Nion_Xray(int n, float M_Min, float M_Max);
float Mass_limit (float logM, float PL, float FRAC);
void bisection(float *x, float xlow, float xup, int *iter);
float Mass_limit_bisection(float Mmin, float Mmax, float PL, float FRAC);
double sheth_delc(double del, double sig);
float dNdM_conditional(float growthf, float M1, float M2, float delta1, float delta2, float sigma2);
double dNion_ConditionallnM(double lnM, void *params);
double Nion_ConditionalM(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES);
double dNion_ConditionallnM_MINI(double lnM, void *params);
double Nion_ConditionalM_MINI(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES);
float GaussLegendreQuad_Nion(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES);
float GaussLegendreQuad_Nion_MINI(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float MassTurnover_upper, float Alpha_star, float Alpha_esc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES);
//JBM: Exact integral for power-law indices non zero (for zero it's erfc)
double Fcollapprox (double numin, double beta);
int n_redshifts_1DTable;
double zmin_1DTable, zmax_1DTable, zbin_width_1DTable;
double *FgtrM_1DTable_linear;
static gsl_interp_accel *Q_at_z_spline_acc;
static gsl_spline *Q_at_z_spline;
static gsl_interp_accel *z_at_Q_spline_acc;
static gsl_spline *z_at_Q_spline;
static double Zmin, Zmax, Qmin, Qmax;
void Q_at_z(double z, double *splined_value);
void z_at_Q(double Q, double *splined_value);
static gsl_interp_accel *deltaz_spline_for_photoncons_acc;
static gsl_spline *deltaz_spline_for_photoncons;
static gsl_interp_accel *NFHistory_spline_acc;
static gsl_spline *NFHistory_spline;
static gsl_interp_accel *z_NFHistory_spline_acc;
static gsl_spline *z_NFHistory_spline;
void initialise_NFHistory_spline(double *redshifts, double *NF_estimate, int NSpline);
void z_at_NFHist(double xHI_Hist, double *splined_value);
void NFHist_at_z(double z, double *splined_value);
//int nbin;
//double *z_Q, *Q_value, *Q_z, *z_value;
double FinalNF_Estimate, FirstNF_Estimate;
struct parameters_gsl_FgtrM_int_{
double z_obs;
double gf_obs;
};
struct parameters_gsl_SFR_General_int_{
double z_obs;
double gf_obs;
double Mdrop;
double Mdrop_upper;
double pl_star;
double pl_esc;
double frac_star;
double frac_esc;
double LimitMass_Fstar;
double LimitMass_Fesc;
};
struct parameters_gsl_SFR_con_int_{
double gf_obs;
double Mval;
double sigma2;
double delta1;
double delta2;
double Mdrop;
double Mdrop_upper;
double pl_star;
double pl_esc;
double frac_star;
double frac_esc;
double LimitMass_Fstar;
double LimitMass_Fesc;
};
unsigned long *lvector(long nl, long nh);
void free_lvector(unsigned long *v, long nl, long nh);
float *vector(long nl, long nh);
void free_vector(float *v, long nl, long nh);
void spline(float x[], float y[], int n, float yp1, float ypn, float y2[]);
void splint(float xa[], float ya[], float y2a[], int n, float x, float *y);
void gauleg(float x1, float x2, float x[], float w[], int n);
/***** FUNCTION PROTOTYPES *****/
double init_ps(); /* initialize global variables, MUST CALL THIS FIRST!!! returns R_CUTOFF */
void free_ps(); /* deallocates the gsl structures from init_ps */
double sigma_z0(double M); //calculates sigma at z=0 (no dicke)
double power_in_k(double k); /* Returns the value of the linear power spectrum density (i.e. <|delta_k|^2>/V) at a given k mode at z=0 */
double TFmdm(double k); //Eisenstein & Hu power spectrum transfer function
void TFset_parameters();
double TF_CLASS(double k, int flag_int, int flag_dv); //transfer function of matter (flag_dv=0) and relative velocities (flag_dv=1) fluctuations from CLASS
double power_in_vcb(double k); /* Returns the value of the DM-b relative velocity power spectrum density (i.e. <|delta_k|^2>/V) at a given k mode at z=0 */
double FgtrM(double z, double M);
double FgtrM_wsigma(double z, double sig);
double FgtrM_st(double z, double M);
double FgtrM_Watson(double growthf, double M);
double FgtrM_Watson_z(double z, double growthf, double M);
double FgtrM_General(double z, double M);
float erfcc(float x);
double splined_erfc(double x);
double M_J_WDM();
void Broadcast_struct_global_PS(struct UserParams *user_params, struct CosmoParams *cosmo_params){
cosmo_params_ps = cosmo_params;
user_params_ps = user_params;
}
/*
this function reads the z=0 matter (CDM+baryons) and relative velocity transfer functions from CLASS (from a file)
flag_int = 0 to initialize interpolator, flag_int = -1 to free memory, flag_int = else to interpolate.
flag_dv = 0 to output density, flag_dv = 1 to output velocity.
similar to built-in function "double T_RECFAST(float z, int flag)"
*/
double TF_CLASS(double k, int flag_int, int flag_dv)
{
static double kclass[CLASS_LENGTH], Tmclass[CLASS_LENGTH], Tvclass_vcb[CLASS_LENGTH];
static gsl_interp_accel *acc_density, *acc_vcb;
static gsl_spline *spline_density, *spline_vcb;
float trash, currk, currTm, currTv;
double ans;
int i;
int gsl_status;
FILE *F;
char filename[500];
sprintf(filename,"%s/%s",global_params.external_table_path,CLASS_FILENAME);
if (flag_int == 0) { // Initialize vectors and read file
if (!(F = fopen(filename, "r"))) {
LOG_ERROR("Unable to open file: %s for reading.", filename);
Throw(IOError);
}
int nscans;
for (i = 0; i < CLASS_LENGTH; i++) {
nscans = fscanf(F, "%e %e %e ", &currk, &currTm, &currTv);
if (nscans != 3) {
LOG_ERROR("Reading CLASS Transfer Function failed.");
Throw(IOError);
}
kclass[i] = currk;
Tmclass[i] = currTm;
Tvclass_vcb[i] = currTv;
if (i > 0 && kclass[i] <= kclass[i - 1]) {
LOG_WARNING("Tk table not ordered");
LOG_WARNING("k=%.1le kprev=%.1le", kclass[i], kclass[i - 1]);
}
}
fclose(F);
LOG_SUPER_DEBUG("Read CLASS Transfer file");
gsl_set_error_handler_off();
// Set up spline table for densities
acc_density = gsl_interp_accel_alloc ();
spline_density = gsl_spline_alloc (gsl_interp_cspline, CLASS_LENGTH);
gsl_status = gsl_spline_init(spline_density, kclass, Tmclass, CLASS_LENGTH);
GSL_ERROR(gsl_status);
LOG_SUPER_DEBUG("Generated CLASS Density Spline.");
//Set up spline table for velocities
acc_vcb = gsl_interp_accel_alloc ();
spline_vcb = gsl_spline_alloc (gsl_interp_cspline, CLASS_LENGTH);
gsl_status = gsl_spline_init(spline_vcb, kclass, Tvclass_vcb, CLASS_LENGTH);
GSL_ERROR(gsl_status);
LOG_SUPER_DEBUG("Generated CLASS velocity Spline.");
return 0;
}
else if (flag_int == -1) {
gsl_spline_free (spline_density);
gsl_interp_accel_free(acc_density);
gsl_spline_free (spline_vcb);
gsl_interp_accel_free(acc_vcb);
return 0;
}
if (k > kclass[CLASS_LENGTH-1]) { // k>kmax
LOG_WARNING("Called TF_CLASS with k=%f, larger than kmax! Returning value at kmax.", k);
if(flag_dv == 0){ // output is density
return (Tmclass[CLASS_LENGTH]/kclass[CLASS_LENGTH-1]/kclass[CLASS_LENGTH-1]);
}
else if(flag_dv == 1){ // output is rel velocity
return (Tvclass_vcb[CLASS_LENGTH]/kclass[CLASS_LENGTH-1]/kclass[CLASS_LENGTH-1]);
} //we just set it to the last value, since sometimes it wants large k for R<<cell_size, which does not matter much.
}
else { // Do spline
if(flag_dv == 0){ // output is density
ans = gsl_spline_eval (spline_density, k, acc_density);
}
else if(flag_dv == 1){ // output is relative velocity
ans = gsl_spline_eval (spline_vcb, k, acc_vcb);
}
else{
ans=0.0; //neither densities not velocities?
}
}
return ans/k/k;
//we have to divide by k^2 to agree with the old-fashioned convention.
}
// FUNCTION sigma_z0(M)
// Returns the standard deviation of the normalized, density excess (delta(x)) field,
// smoothed on the comoving scale of M (see filter definitions for M<->R conversion).
// The sigma is evaluated at z=0, with the time evolution contained in the dicke(z) factor,
// i.e. sigma(M,z) = sigma_z0(m) * dicke(z)
// normalized so that sigma_z0(M->8/h Mpc) = SIGMA8 in ../Parameter_files/COSMOLOGY.H
// NOTE: volume is normalized to = 1, so this is equvalent to the mass standard deviation
// M is in solar masses
// References: Padmanabhan, pg. 210, eq. 5.107
double dsigma_dk(double k, void *params){
double p, w, T, gamma, q, aa, bb, cc, kR;
// get the power spectrum.. choice of 5:
if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu
T = TFmdm(k);
// check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function
if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v);
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
}
else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm));
q = k / (cosmo_params_ps->hlittle*gamma);
T = (log(1.0+2.34*q)/(2.34*q)) *
pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25);
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
}
else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992)
gamma = 0.25;
aa = 6.4/(cosmo_params_ps->hlittle*gamma);
bb = 3.0/(cosmo_params_ps->hlittle*gamma);
cc = 1.7/(cosmo_params_ps->hlittle*gamma);
p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 );
}
else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm));
aa = 8.0 / (cosmo_params_ps->hlittle*gamma);
bb = 4.7 / pow(cosmo_params_ps->hlittle*gamma, 2);
p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2);
}
else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm));
aa = 1.7/(cosmo_params_ps->hlittle*gamma);
bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5);
cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2);
p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + bb*pow(k, 1.5) + cc*k*k, 2);
}
else if (user_params_ps->POWER_SPECTRUM == 5){ // output of CLASS
T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS. Note, flag_int = 1 here always, since now we have to have initialized the interpolator for CLASS
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
if(user_params_ps->USE_RELATIVE_VELOCITIES) { //jbm:Add average relvel suppression
p *= 1.0 - A_VCB_PM*exp( -pow(log(k/KP_VCB_PM),2.0)/(2.0*SIGMAK_VCB_PM*SIGMAK_VCB_PM)); //for v=vrms
}
}
else{
LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM);
Throw(ValueError);
}
double Radius;
Radius = *(double *)params;
kR = k*Radius;
if ( (global_params.FILTER == 0) || (sigma_norm < 0) ){ // top hat
if ( (kR) < 1.0e-4 ){ w = 1.0;} // w converges to 1 as (kR) -> 0
else { w = 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2));}
}
else if (global_params.FILTER == 1){ // gaussian of width 1/R
w = pow(E, -kR*kR/2.0);
}
else {
LOG_ERROR("No such filter: %i. Output is bogus.", global_params.FILTER);
Throw(ValueError);
}
return k*k*p*w*w;
}
double sigma_z0(double M){
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance
gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000);
double kstart, kend;
double Radius;
// R = MtoR(M);
Radius = MtoR(M);
// now lets do the integral for sigma and scale it with sigma_norm
if(user_params_ps->POWER_SPECTRUM == 5){
kstart = fmax(1.0e-99/Radius, KBOT_CLASS);
kend = fmin(350.0/Radius, KTOP_CLASS);
}//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max!
else{
kstart = 1.0e-99/Radius;
kend = 350.0/Radius;
}
lower_limit = kstart;//log(kstart);
upper_limit = kend;//log(kend);
F.function = &dsigma_dk;
F.params = &Radius;
int status;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: M=%e",M);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
return sigma_norm * sqrt(result);
}
// FUNCTION TFmdm is the power spectrum transfer function from Eisenstein & Hu ApJ, 1999, 511, 5
double TFmdm(double k){
double q, gamma_eff, q_eff, TF_m, q_nu;
q = k*pow(theta_cmb,2)/omhh;
gamma_eff=sqrt(alpha_nu) + (1.0-sqrt(alpha_nu))/(1.0+pow(0.43*k*sound_horizon, 4));
q_eff = q/gamma_eff;
TF_m= log(E+1.84*beta_c*sqrt(alpha_nu)*q_eff);
TF_m /= TF_m + pow(q_eff,2) * (14.4 + 325.0/(1.0+60.5*pow(q_eff,1.11)));
q_nu = 3.92*q/sqrt(f_nu/N_nu);
TF_m *= 1.0 + (1.2*pow(f_nu,0.64)*pow(N_nu,0.3+0.6*f_nu)) /
(pow(q_nu,-1.6)+pow(q_nu,0.8));
return TF_m;
}
void TFset_parameters(){
double z_drag, R_drag, R_equality, p_c, p_cb, f_c, f_cb, f_nub, k_equality;
LOG_DEBUG("Setting Transfer Function parameters.");
z_equality = 25000*omhh*pow(theta_cmb, -4) - 1.0;
k_equality = 0.0746*omhh/(theta_cmb*theta_cmb);
z_drag = 0.313*pow(omhh,-0.419) * (1 + 0.607*pow(omhh, 0.674));
z_drag = 1 + z_drag*pow(cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle, 0.238*pow(omhh, 0.223));
z_drag *= 1291 * pow(omhh, 0.251) / (1 + 0.659*pow(omhh, 0.828));
y_d = (1 + z_equality) / (1.0 + z_drag);
R_drag = 31.5 * cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle * pow(theta_cmb, -4) * 1000 / (1.0 + z_drag);
R_equality = 31.5 * cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle * pow(theta_cmb, -4) * 1000 / (1.0 + z_equality);
sound_horizon = 2.0/3.0/k_equality * sqrt(6.0/R_equality) *
log( (sqrt(1+R_drag) + sqrt(R_drag+R_equality)) / (1.0 + sqrt(R_equality)) );
p_c = -(5 - sqrt(1 + 24*(1 - f_nu-f_baryon)))/4.0;
p_cb = -(5 - sqrt(1 + 24*(1 - f_nu)))/4.0;
f_c = 1 - f_nu - f_baryon;
f_cb = 1 - f_nu;
f_nub = f_nu+f_baryon;
alpha_nu = (f_c/f_cb) * (2*(p_c+p_cb)+5)/(4*p_cb+5.0);
alpha_nu *= 1 - 0.553*f_nub+0.126*pow(f_nub,3);
alpha_nu /= 1-0.193*sqrt(f_nu)+0.169*f_nu;
alpha_nu *= pow(1+y_d, p_c-p_cb);
alpha_nu *= 1+ (p_cb-p_c)/2.0 * (1.0+1.0/(4.0*p_c+3.0)/(4.0*p_cb+7.0))/(1.0+y_d);
beta_c = 1.0/(1.0-0.949*f_nub);
}
// Returns the value of the linear power spectrum DENSITY (i.e. <|delta_k|^2>/V)
// at a given k mode linearly extrapolated to z=0
double power_in_k(double k){
double p, T, gamma, q, aa, bb, cc;
// get the power spectrum.. choice of 5:
if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu
T = TFmdm(k);
// check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function
if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v);
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
//p = pow(k, POWER_INDEX - 0.05*log(k/0.05)) * T * T; //running, alpha=0.05
}
else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm));
q = k / (cosmo_params_ps->hlittle*gamma);
T = (log(1.0+2.34*q)/(2.34*q)) *
pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25);
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
}
else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992)
gamma = 0.25;
aa = 6.4/(cosmo_params_ps->hlittle*gamma);
bb = 3.0/(cosmo_params_ps->hlittle*gamma);
cc = 1.7/(cosmo_params_ps->hlittle*gamma);
p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 );
}
else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm));
aa = 8.0 / (cosmo_params_ps->hlittle*gamma);
bb = 4.7 / pow(cosmo_params_ps->hlittle*gamma, 2);
p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2);
}
else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm));
aa = 1.7/(cosmo_params_ps->hlittle*gamma);
bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5);
cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2);
p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + bb*pow(k, 1.5) + cc*k*k, 2);
}
else if (user_params_ps->POWER_SPECTRUM == 5){ // output of CLASS
T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS. Note, flag_int = 1 here always, since now we have to have initialized the interpolator for CLASS
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
if(user_params_ps->USE_RELATIVE_VELOCITIES) { //jbm:Add average relvel suppression
p *= 1.0 - A_VCB_PM*exp( -pow(log(k/KP_VCB_PM),2.0)/(2.0*SIGMAK_VCB_PM*SIGMAK_VCB_PM)); //for v=vrms
}
}
else{
LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM);
Throw(ValueError);
}
return p*TWOPI*PI*sigma_norm*sigma_norm;
}
/*
Returns the value of the linear power spectrum of the DM-b relative velocity
at kinematic decoupling (which we set at zkin=1010)
*/
double power_in_vcb(double k){
double p, T, gamma, q, aa, bb, cc;
//only works if using CLASS
if (user_params_ps->POWER_SPECTRUM == 5){ // CLASS
T = TF_CLASS(k, 1, 1); //read from CLASS file. flag_int=1 since we have initialized before, flag_vcb=1 for velocity
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
}
else{
LOG_ERROR("Cannot get P_cb unless using CLASS: %i\n Set USE_RELATIVE_VELOCITIES 0 or use CLASS.\n", user_params_ps->POWER_SPECTRUM);
Throw(ValueError);
}
return p*TWOPI*PI*sigma_norm*sigma_norm;
}
double init_ps(){
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance
gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000);
double kstart, kend;
//we start the interpolator if using CLASS:
if (user_params_ps->POWER_SPECTRUM == 5){
LOG_DEBUG("Setting CLASS Transfer Function inits.");
TF_CLASS(1.0, 0, 0);
}
// Set cuttoff scale for WDM (eq. 4 in Barkana et al. 2001) in comoving Mpc
R_CUTOFF = 0.201*pow((cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15, 0.15)*pow(global_params.g_x/1.5, -0.29)*pow(global_params.M_WDM, -1.15);
omhh = cosmo_params_ps->OMm*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle;
theta_cmb = T_cmb / 2.7;
// Translate Parameters into forms GLOBALVARIABLES form
f_nu = global_params.OMn/cosmo_params_ps->OMm;
f_baryon = cosmo_params_ps->OMb/cosmo_params_ps->OMm;
if (f_nu < TINY) f_nu = 1e-10;
if (f_baryon < TINY) f_baryon = 1e-10;
TFset_parameters();
sigma_norm = -1;
double Radius_8;
Radius_8 = 8.0/cosmo_params_ps->hlittle;
if(user_params_ps->POWER_SPECTRUM == 5){
kstart = fmax(1.0e-99/Radius_8, KBOT_CLASS);
kend = fmin(350.0/Radius_8, KTOP_CLASS);
}//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max!
else{
kstart = 1.0e-99/Radius_8;
kend = 350.0/Radius_8;
}
lower_limit = kstart;
upper_limit = kend;
LOG_DEBUG("Initializing Power Spectrum with lower_limit=%e, upper_limit=%e, rel_tol=%e, radius_8=%g", lower_limit,upper_limit, rel_tol, Radius_8);
F.function = &dsigma_dk;
F.params = &Radius_8;
int status;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
LOG_DEBUG("Initialized Power Spectrum.");
sigma_norm = cosmo_params_ps->SIGMA_8/sqrt(result); //takes care of volume factor
return R_CUTOFF;
}
//function to free arrays related to the power spectrum
void free_ps(){
//we free the PS interpolator if using CLASS:
if (user_params_ps->POWER_SPECTRUM == 5){
TF_CLASS(1.0, -1, 0);
}
return;
}
/*
FUNCTION dsigmasqdm_z0(M)
returns d/dm (sigma^2) (see function sigma), in units of Msun^-1
*/
double dsigmasq_dm(double k, void *params){
double p, w, T, gamma, q, aa, bb, cc, dwdr, drdm, kR;
// get the power spectrum.. choice of 5:
if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu ApJ, 1999, 511, 5
T = TFmdm(k);
// check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function
if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v);
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
//p = pow(k, POWER_INDEX - 0.05*log(k/0.05)) * T * T; //running, alpha=0.05
}
else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm));
q = k / (cosmo_params_ps->hlittle*gamma);
T = (log(1.0+2.34*q)/(2.34*q)) *
pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25);
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
}
else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992)
gamma = 0.25;
aa = 6.4/(cosmo_params_ps->hlittle*gamma);
bb = 3.0/(cosmo_params_ps->hlittle*gamma);
cc = 1.7/(cosmo_params_ps->hlittle*gamma);
p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 );
}
else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm));
aa = 8.0 / (cosmo_params_ps->hlittle*gamma);
bb = 4.7 / (cosmo_params_ps->hlittle*gamma);
p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2);
}
else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52
gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm));
aa = 1.7/(cosmo_params_ps->hlittle*gamma);
bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5);
cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2);
p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + pow(bb*k, 1.5) + cc*k*k, 2);
}
else if (user_params_ps->POWER_SPECTRUM == 5){ // JBM: CLASS
T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS
p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T;
if(user_params_ps->USE_RELATIVE_VELOCITIES) { //jbm:Add average relvel suppression
p *= 1.0 - A_VCB_PM*exp( -pow(log(k/KP_VCB_PM),2.0)/(2.0*SIGMAK_VCB_PM*SIGMAK_VCB_PM)); //for v=vrms
}
}
else{
LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM);
Throw(ValueError);
}
double Radius;
Radius = *(double *)params;
// now get the value of the window function
kR = k * Radius;
if (global_params.FILTER == 0){ // top hat
if ( (kR) < 1.0e-4 ){ w = 1.0; }// w converges to 1 as (kR) -> 0
else { w = 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2));}
// now do d(w^2)/dm = 2 w dw/dr dr/dm
if ( (kR) < 1.0e-10 ){ dwdr = 0;}
else{ dwdr = 9*cos(kR)*k/pow(kR,3) + 3*sin(kR)*(1 - 3/(kR*kR))/(kR*Radius);}
//3*k*( 3*cos(kR)/pow(kR,3) + sin(kR)*(-3*pow(kR, -4) + 1/(kR*kR)) );}
// dwdr = -1e8 * k / (R*1e3);
drdm = 1.0 / (4.0*PI * cosmo_params_ps->OMm*RHOcrit * Radius*Radius);
}
else if (global_params.FILTER == 1){ // gaussian of width 1/R
w = pow(E, -kR*kR/2.0);
dwdr = - k*kR * w;
drdm = 1.0 / (pow(2*PI, 1.5) * cosmo_params_ps->OMm*RHOcrit * 3*Radius*Radius);
}
else {
LOG_ERROR("No such filter: %i. Output is bogus.", global_params.FILTER);
Throw(ValueError);
}
// return k*k*p*2*w*dwdr*drdm * d2fact;
return k*k*p*2*w*dwdr*drdm;
}
double dsigmasqdm_z0(double M){
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
double kstart, kend;
double Radius;
// R = MtoR(M);
Radius = MtoR(M);
// now lets do the integral for sigma and scale it with sigma_norm
if(user_params_ps->POWER_SPECTRUM == 5){
kstart = fmax(1.0e-99/Radius, KBOT_CLASS);
kend = fmin(350.0/Radius, KTOP_CLASS);
}//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max!
else{
kstart = 1.0e-99/Radius;
kend = 350.0/Radius;
}
lower_limit = kstart;//log(kstart);
upper_limit = kend;//log(kend);
if (user_params_ps->POWER_SPECTRUM == 5){ // for CLASS we do not need to renormalize the sigma integral.
d2fact=1.0;
}
else {
d2fact = M*10000/sigma_z0(M);
}
F.function = &dsigmasq_dm;
F.params = &Radius;
int status;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: M=%e",M);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
// return sigma_norm * sigma_norm * result /d2fact;
return sigma_norm * sigma_norm * result;
}
/* sheth correction to delta crit */
double sheth_delc(double del, double sig){
return sqrt(SHETH_a)*del*(1. + global_params.SHETH_b*pow(sig*sig/(SHETH_a*del*del), global_params.SHETH_c));
}
/*
FUNCTION dNdM_st(z, M)
Computes the Press_schechter mass function with Sheth-Torman correction for ellipsoidal collapse at
redshift z, and dark matter halo mass M (in solar masses).
Uses interpolated sigma and dsigmadm to be computed faster. Necessary for mass-dependent ionising efficiencies.
The return value is the number density per unit mass of halos in the mass range M to M+dM in units of:
comoving Mpc^-3 Msun^-1
Reference: Sheth, Mo, Torman 2001
*/
double dNdM_st(double growthf, double M){
double sigma, dsigmadm, nuhat;
float MassBinLow;
int MassBin;
if(user_params_ps->USE_INTERPOLATION_TABLES) {
MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = -pow(10.,dsigmadm);
}
else {
sigma = sigma_z0(M);
dsigmadm = dsigmasqdm_z0(M);
}
sigma = sigma * growthf;
dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma));
nuhat = sqrt(SHETH_a) * Deltac / sigma;
return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * sqrt(2./PI)*SHETH_A * (1+ pow(nuhat, -2*SHETH_p)) * nuhat * pow(E, -nuhat*nuhat/2.0);
}
/*
FUNCTION dNdM_WatsonFOF(z, M)
Computes the Press_schechter mass function with Warren et al. 2011 correction for ellipsoidal collapse at
redshift z, and dark matter halo mass M (in solar masses).
The Universial FOF function (Eq. 12) of Watson et al. 2013
The return value is the number density per unit mass of halos in the mass range M to M+dM in units of:
comoving Mpc^-3 Msun^-1
Reference: Watson et al. 2013
*/
double dNdM_WatsonFOF(double growthf, double M){
double sigma, dsigmadm, f_sigma;
float MassBinLow;
int MassBin;
if(user_params_ps->USE_INTERPOLATION_TABLES) {
MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = -pow(10.,dsigmadm);
}
else {
sigma = sigma_z0(M);
dsigmadm = dsigmasqdm_z0(M);
}
sigma = sigma * growthf;
dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma));
f_sigma = Watson_A * ( pow( Watson_beta/sigma, Watson_alpha) + 1. ) * exp( - Watson_gamma/(sigma*sigma) );
return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * f_sigma;
}
/*
FUNCTION dNdM_WatsonFOF_z(z, M)
Computes the Press_schechter mass function with Warren et al. 2011 correction for ellipsoidal collapse at
redshift z, and dark matter halo mass M (in solar masses).
The Universial FOF function, with redshift evolution (Eq. 12 - 15) of Watson et al. 2013.
The return value is the number density per unit mass of halos in the mass range M to M+dM in units of:
comoving Mpc^-3 Msun^-1
Reference: Watson et al. 2013
*/
double dNdM_WatsonFOF_z(double z, double growthf, double M){
double sigma, dsigmadm, A_z, alpha_z, beta_z, Omega_m_z, f_sigma;
float MassBinLow;
int MassBin;
if(user_params_ps->USE_INTERPOLATION_TABLES) {
MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = -pow(10.,dsigmadm);
}
else {
sigma = sigma_z0(M);
dsigmadm = dsigmasqdm_z0(M);
}
sigma = sigma * growthf;
dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma));
Omega_m_z = (cosmo_params_ps->OMm)*pow(1.+z,3.) / ( (cosmo_params_ps->OMl) + (cosmo_params_ps->OMm)*pow(1.+z,3.) + (global_params.OMr)*pow(1.+z,4.) );
A_z = Omega_m_z * ( Watson_A_z_1 * pow(1. + z, Watson_A_z_2 ) + Watson_A_z_3 );
alpha_z = Omega_m_z * ( Watson_alpha_z_1 * pow(1.+z, Watson_alpha_z_2 ) + Watson_alpha_z_3 );
beta_z = Omega_m_z * ( Watson_beta_z_1 * pow(1.+z, Watson_beta_z_2 ) + Watson_beta_z_3 );
f_sigma = A_z * ( pow(beta_z/sigma, alpha_z) + 1. ) * exp( - Watson_gamma_z/(sigma*sigma) );
return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * f_sigma;
}
/*
FUNCTION dNdM(growthf, M)
Computes the Press_schechter mass function at
redshift z (using the growth factor), and dark matter halo mass M (in solar masses).
Uses interpolated sigma and dsigmadm to be computed faster. Necessary for mass-dependent ionising efficiencies.
The return value is the number density per unit mass of halos in the mass range M to M+dM in units of:
comoving Mpc^-3 Msun^-1
Reference: Padmanabhan, pg. 214
*/
double dNdM(double growthf, double M){
double sigma, dsigmadm;
float MassBinLow;
int MassBin;
if(user_params_ps->USE_INTERPOLATION_TABLES) {
MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = -pow(10.,dsigmadm);
}
else {
sigma = sigma_z0(M);
dsigmadm = dsigmasqdm_z0(M);
}
sigma = sigma * growthf;
dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma));
return (-(cosmo_params_ps->OMm)*RHOcrit/M) * sqrt(2/PI) * (Deltac/(sigma*sigma)) * dsigmadm * pow(E, -(Deltac*Deltac)/(2*sigma*sigma));
}
/*
FUNCTION FgtrM(z, M)
Computes the fraction of mass contained in haloes with mass > M at redshift z
*/
double FgtrM(double z, double M){
double del, sig;
del = Deltac/dicke(z); //regular spherical collapse delta
sig = sigma_z0(M);
return splined_erfc(del / (sqrt(2)*sig));
}
/*
FUNCTION FgtrM_wsigma(z, sigma_z0(M))
Computes the fraction of mass contained in haloes with mass > M at redshift z.
Requires sigma_z0(M) rather than M to make certain heating integrals faster
*/
double FgtrM_wsigma(double z, double sig){
double del;
del = Deltac/dicke(z); //regular spherical collapse delta
return splined_erfc(del / (sqrt(2)*sig));
}
/*
FUNCTION FgtrM_Watson(z, M)
Computes the fraction of mass contained in haloes with mass > M at redshift z
Uses Watson et al (2013) correction
*/
double dFdlnM_Watson_z (double lnM, void *params){
struct parameters_gsl_FgtrM_int_ vals = *(struct parameters_gsl_FgtrM_int_ *)params;
double M = exp(lnM);
double z = vals.z_obs;
double growthf = vals.gf_obs;
return dNdM_WatsonFOF_z(z, growthf, M) * M * M;
}
double FgtrM_Watson_z(double z, double growthf, double M){
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.001; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
F.function = &dFdlnM_Watson_z;
struct parameters_gsl_FgtrM_int_ parameters_gsl_FgtrM = {
.z_obs = z,
.gf_obs = growthf,
};
F.params = ¶meters_gsl_FgtrM;
lower_limit = log(M);
upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M*100));
int status;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: z=%e growthf=%e M=%e",z,growthf,M);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
return result / (cosmo_params_ps->OMm*RHOcrit);
}
/*
FUNCTION FgtrM_Watson(z, M)
Computes the fraction of mass contained in haloes with mass > M at redshift z
Uses Watson et al (2013) correction
*/
double dFdlnM_Watson (double lnM, void *params){
double growthf = *(double *)params;
double M = exp(lnM);
return dNdM_WatsonFOF(growthf, M) * M * M;
}
double FgtrM_Watson(double growthf, double M){
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.001; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
F.function = &dFdlnM_Watson;
F.params = &growthf;
lower_limit = log(M);
upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M*100));
int status;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: growthf=%e M=%e",growthf,M);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
return result / (cosmo_params_ps->OMm*RHOcrit);
}
double dFdlnM_General(double lnM, void *params){
struct parameters_gsl_FgtrM_int_ vals = *(struct parameters_gsl_FgtrM_int_ *)params;
double M = exp(lnM);
double z = vals.z_obs;
double growthf = vals.gf_obs;
double MassFunction;
if(user_params_ps->HMF==0) {
MassFunction = dNdM(growthf, M);
}
if(user_params_ps->HMF==1) {
MassFunction = dNdM_st(growthf, M);
}
if(user_params_ps->HMF==2) {
MassFunction = dNdM_WatsonFOF(growthf, M);
}
if(user_params_ps->HMF==3) {
MassFunction = dNdM_WatsonFOF_z(z, growthf, M);
}
return MassFunction * M * M;
}
/*
FUNCTION FgtrM_General(z, M)
Computes the fraction of mass contained in haloes with mass > M at redshift z
*/
double FgtrM_General(double z, double M){
double del, sig, growthf;
int status;
growthf = dicke(z);
struct parameters_gsl_FgtrM_int_ parameters_gsl_FgtrM = {
.z_obs = z,
.gf_obs = growthf,
};
if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) {
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.001; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
F.function = &dFdlnM_General;
F.params = ¶meters_gsl_FgtrM;
lower_limit = log(M);
upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M*100));
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: z=%e growthf=%e M=%e",z,growthf,M);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
return result / (cosmo_params_ps->OMm*RHOcrit);
}
else {
LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF);
Throw(ValueError);
}
}
double dNion_General(double lnM, void *params){
struct parameters_gsl_SFR_General_int_ vals = *(struct parameters_gsl_SFR_General_int_ *)params;
double M = exp(lnM);
double z = vals.z_obs;
double growthf = vals.gf_obs;
double MassTurnover = vals.Mdrop;
double Alpha_star = vals.pl_star;
double Alpha_esc = vals.pl_esc;
double Fstar10 = vals.frac_star;
double Fesc10 = vals.frac_esc;
double Mlim_Fstar = vals.LimitMass_Fstar;
double Mlim_Fesc = vals.LimitMass_Fesc;
double Fstar, Fesc, MassFunction;
if (Alpha_star > 0. && M > Mlim_Fstar)
Fstar = 1./Fstar10;
else if (Alpha_star < 0. && M < Mlim_Fstar)
Fstar = 1/Fstar10;
else
Fstar = pow(M/1e10,Alpha_star);
if (Alpha_esc > 0. && M > Mlim_Fesc)
Fesc = 1./Fesc10;
else if (Alpha_esc < 0. && M < Mlim_Fesc)
Fesc = 1./Fesc10;
else
Fesc = pow(M/1e10,Alpha_esc);
if(user_params_ps->HMF==0) {
MassFunction = dNdM(growthf, M);
}
if(user_params_ps->HMF==1) {
MassFunction = dNdM_st(growthf,M);
}
if(user_params_ps->HMF==2) {
MassFunction = dNdM_WatsonFOF(growthf, M);
}
if(user_params_ps->HMF==3) {
MassFunction = dNdM_WatsonFOF_z(z, growthf, M);
}
return MassFunction * M * M * exp(-MassTurnover/M) * Fstar * Fesc;
}
double Nion_General(double z, double M_Min, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc){
double growthf;
growthf = dicke(z);
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.001; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
struct parameters_gsl_SFR_General_int_ parameters_gsl_SFR = {
.z_obs = z,
.gf_obs = growthf,
.Mdrop = MassTurnover,
.pl_star = Alpha_star,
.pl_esc = Alpha_esc,
.frac_star = Fstar10,
.frac_esc = Fesc10,
.LimitMass_Fstar = Mlim_Fstar,
.LimitMass_Fesc = Mlim_Fesc,
};
int status;
if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) {
F.function = &dNion_General;
F.params = ¶meters_gsl_SFR;
lower_limit = log(M_Min);
upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M_Min*100));
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: z=%e growthf=%e MassTurnover=%e Alpha_star=%e Alpha_esc=%e",z,growthf,MassTurnover,Alpha_star,Alpha_esc);
LOG_ERROR("data: Fstar10=%e Fesc10=%e Mlim_Fstar=%e Mlim_Fesc=%e",Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
return result / ((cosmo_params_ps->OMm)*RHOcrit);
}
else {
LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF);
Throw(ValueError);
}
}
double dNion_General_MINI(double lnM, void *params){
struct parameters_gsl_SFR_General_int_ vals = *(struct parameters_gsl_SFR_General_int_ *)params;
double M = exp(lnM);
double z = vals.z_obs;
double growthf = vals.gf_obs;
double MassTurnover = vals.Mdrop;
double MassTurnover_upper = vals.Mdrop_upper;
double Alpha_star = vals.pl_star;
double Alpha_esc = vals.pl_esc;
double Fstar7_MINI = vals.frac_star;
double Fesc7_MINI = vals.frac_esc;
double Mlim_Fstar = vals.LimitMass_Fstar;
double Mlim_Fesc = vals.LimitMass_Fesc;
double Fstar, Fesc, MassFunction;
if (Alpha_star > 0. && M > Mlim_Fstar)
Fstar = 1./Fstar7_MINI;
else if (Alpha_star < 0. && M < Mlim_Fstar)
Fstar = 1/Fstar7_MINI;
else
Fstar = pow(M/1e7,Alpha_star);
if (Alpha_esc > 0. && M > Mlim_Fesc)
Fesc = 1./Fesc7_MINI;
else if (Alpha_esc < 0. && M < Mlim_Fesc)
Fesc = 1./Fesc7_MINI;
else
Fesc = pow(M/1e7,Alpha_esc);
if(user_params_ps->HMF==0) {
MassFunction = dNdM(growthf, M);
}
if(user_params_ps->HMF==1) {
MassFunction = dNdM_st(growthf,M);
}
if(user_params_ps->HMF==2) {
MassFunction = dNdM_WatsonFOF(growthf, M);
}
if(user_params_ps->HMF==3) {
MassFunction = dNdM_WatsonFOF_z(z, growthf, M);
}
return MassFunction * M * M * exp(-MassTurnover/M) * exp(-M/MassTurnover_upper) * Fstar * Fesc;
}
double Nion_General_MINI(double z, double M_Min, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar7_MINI, double Fesc7_MINI, double Mlim_Fstar, double Mlim_Fesc){
double growthf;
int status;
growthf = dicke(z);
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.001; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
struct parameters_gsl_SFR_General_int_ parameters_gsl_SFR = {
.z_obs = z,
.gf_obs = growthf,
.Mdrop = MassTurnover,
.Mdrop_upper = MassTurnover_upper,
.pl_star = Alpha_star,
.pl_esc = Alpha_esc,
.frac_star = Fstar7_MINI,
.frac_esc = Fesc7_MINI,
.LimitMass_Fstar = Mlim_Fstar,
.LimitMass_Fesc = Mlim_Fesc,
};
if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) {
F.function = &dNion_General_MINI;
F.params = ¶meters_gsl_SFR;
lower_limit = log(M_Min);
upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M_Min*100));
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occurred!");
LOG_ERROR("lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: z=%e growthf=%e MassTurnover=%e MassTurnover_upper=%e",z,growthf,MassTurnover,MassTurnover_upper);
LOG_ERROR("data: Alpha_star=%e Alpha_esc=%e Fstar7_MINI=%e Fesc7_MINI=%e Mlim_Fstar=%e Mlim_Fesc=%e",Alpha_star,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar,Mlim_Fesc);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
return result / ((cosmo_params_ps->OMm)*RHOcrit);
}
else {
LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF);
Throw(ValueError);
}
}
/* returns the "effective Jeans mass" in Msun
corresponding to the gas analog of WDM ; eq. 10 in Barkana+ 2001 */
double M_J_WDM(){
double z_eq, fudge=60;
if (!(global_params.P_CUTOFF))
return 0;
z_eq = 3600*(cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15;
return fudge*3.06e8 * (1.5/global_params.g_x) * sqrt((cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15) * pow(global_params.M_WDM, -4) * pow(z_eq/3000.0, 1.5);
}
float erfcc(float x)
{
double t,q,ans;
q=fabs(x);
t=1.0/(1.0+0.5*q);
ans=t*exp(-q*q-1.2655122+t*(1.0000237+t*(0.374092+t*(0.0967842+
t*(-0.1862881+t*(0.2788681+t*(-1.13520398+t*(1.4885159+
t*(-0.82215223+t*0.17087277)))))))));
return x >= 0.0 ? ans : 2.0-ans;
}
double splined_erfc(double x){
if (x < 0){
return 1.0;
}
// TODO: This could be wrapped in a Try/Catch to try the fast way and if it doesn't
// work, use the slow way.
return erfcc(x); // the interpolation below doesn't seem to be stable in Ts.c
if (x > ERFC_PARAM_DELTA*(ERFC_NPTS-1))
return erfcc(x);
else
return exp(gsl_spline_eval(erfc_spline, x, erfc_acc));
}
void gauleg(float x1, float x2, float x[], float w[], int n)
//Given the lower and upper limits of integration x1 and x2, and given n, this routine returns arrays x[1..n] and w[1..n] of length n,
//containing the abscissas and weights of the Gauss- Legendre n-point quadrature formula.
{
int m,j,i;
double z1,z,xm,xl,pp,p3,p2,p1;
m=(n+1)/2;
xm=0.5*(x2+x1);
xl=0.5*(x2-x1);
for (i=1;i<=m;i++) {
//High precision is a good idea for this routine.
//The roots are symmetric in the interval, so we only have to find half of them.
//Loop over the desired roots.
z=cos(3.141592654*(i-0.25)/(n+0.5));
//Starting with the above approximation to the ith root, we enter the main loop of refinement by Newton’s method.
do {
p1=1.0;
p2=0.0;
for (j=1;j<=n;j++) {
//Loop up the recurrence relation to get the Legendre polynomial evaluated at z.
p3=p2;
p2=p1;
p1=((2.0*j-1.0)*z*p2-(j-1.0)*p3)/j;
}
//p1 is now the desired Legendre polynomial. We next compute pp, its derivative, by a standard relation involving also p2,
//the polynomial of one lower order.
pp=n*(z*p1-p2)/(z*z-1.0);
z1=z;
z=z1-p1/pp;
} while (fabs(z-z1) > EPS2);
x[i]=xm-xl*z;
x[n+1-i]=xm+xl*z;
w[i]=2.0*xl/((1.0-z*z)*pp*pp);
w[n+1-i]=w[i];
}
}
void initialiseSigmaMInterpTable(float M_Min, float M_Max)
{
int i;
float Mass;
if (Mass_InterpTable == NULL){
Mass_InterpTable = calloc(NMass,sizeof(float));
Sigma_InterpTable = calloc(NMass,sizeof(float));
dSigmadm_InterpTable = calloc(NMass,sizeof(float));
}
#pragma omp parallel shared(Mass_InterpTable,Sigma_InterpTable,dSigmadm_InterpTable) private(i) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for(i=0;i<NMass;i++) {
Mass_InterpTable[i] = log(M_Min) + (float)i/(NMass-1)*( log(M_Max) - log(M_Min) );
Sigma_InterpTable[i] = sigma_z0(exp(Mass_InterpTable[i]));
dSigmadm_InterpTable[i] = log10(-dsigmasqdm_z0(exp(Mass_InterpTable[i])));
}
}
for(i=0;i<NMass;i++) {
if(isfinite(Mass_InterpTable[i]) == 0 || isfinite(Sigma_InterpTable[i]) == 0 || isfinite(dSigmadm_InterpTable[i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in initialiseSigmaMInterpTable");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
MinMass = log(M_Min);
mass_bin_width = 1./(NMass-1)*( log(M_Max) - log(M_Min) );
inv_mass_bin_width = 1./mass_bin_width;
}
void freeSigmaMInterpTable()
{
free(Mass_InterpTable);
free(Sigma_InterpTable);
free(dSigmadm_InterpTable);
Mass_InterpTable = NULL;
}
void nrerror(char error_text[])
{
LOG_ERROR("Numerical Recipes run-time error...");
LOG_ERROR("%s",error_text);
Throw(MemoryAllocError);
}
float *vector(long nl, long nh)
/* allocate a float vector with subscript range v[nl..nh] */
{
float *v;
v = (float *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(float)));
if(!v) nrerror("allocation failure in vector()");
return v - nl + NR_END;
}
void free_vector(float *v, long nl, long nh)
/* free a float vector allocated with vector() */
{
free((FREE_ARG) (v+nl-NR_END));
}
void spline(float x[], float y[], int n, float yp1, float ypn, float y2[])
/*Given arrays x[1..n] and y[1..n] containing a tabulated function, i.e., yi = f(xi), with
x1 <x2 < :: : < xN, and given values yp1 and ypn for the first derivative of the interpolating
function at points 1 and n, respectively, this routine returns an array y2[1..n] that contains
the second derivatives of the interpolating function at the tabulated points xi. If yp1 and/or
ypn are equal to 1e30 or larger, the routine is signaled to set the corresponding boundary
condition for a natural spline, with zero second derivative on that boundary.*/
{
int i,k;
float p,qn,sig,un,*u;
int na,nb,check;
u=vector(1,n-1);
if (yp1 > 0.99e30) // The lower boundary condition is set either to be "natural"
y2[1]=u[1]=0.0;
else { // or else to have a specified first derivative.
y2[1] = -0.5;
u[1]=(3.0/(x[2]-x[1]))*((y[2]-y[1])/(x[2]-x[1])-yp1);
}
for (i=2;i<=n-1;i++) { //This is the decomposition loop of the tridiagonal algorithm.
sig=(x[i]-x[i-1])/(x[i+1]-x[i-1]); //y2 and u are used for temporary
na = 1;
nb = 1;
check = 0;
while(((float)(x[i+na*1]-x[i-nb*1])==(float)0.0)) {
check = check + 1;
if(check%2==0) {
na = na + 1;
}
else {
nb = nb + 1;
}
sig=(x[i]-x[i-1])/(x[i+na*1]-x[i-nb*1]);
}
p=sig*y2[i-1]+2.0; //storage of the decomposed
y2[i]=(sig-1.0)/p; // factors.
u[i]=(y[i+1]-y[i])/(x[i+1]-x[i]) - (y[i]-y[i-1])/(x[i]-x[i-1]);
u[i]=(6.0*u[i]/(x[i+1]-x[i-1])-sig*u[i-1])/p;
if(((float)(x[i+1]-x[i])==(float)0.0) || ((float)(x[i]-x[i-1])==(float)0.0)) {
na = 0;
nb = 0;
check = 0;
while((float)(x[i+na*1]-x[i-nb])==(float)(0.0) || ((float)(x[i+na]-x[i-nb*1])==(float)0.0)) {
check = check + 1;
if(check%2==0) {
na = na + 1;
}
else {
nb = nb + 1;
}
}
u[i]=(y[i+1]-y[i])/(x[i+na*1]-x[i-nb]) - (y[i]-y[i-1])/(x[i+na]-x[i-nb*1]);
u[i]=(6.0*u[i]/(x[i+na*1]-x[i-nb*1])-sig*u[i-1])/p;
}
}
if (ypn > 0.99e30) //The upper boundary condition is set either to be "natural"
qn=un=0.0;
else { //or else to have a specified first derivative.
qn=0.5;
un=(3.0/(x[n]-x[n-1]))*(ypn-(y[n]-y[n-1])/(x[n]-x[n-1]));
}
y2[n]=(un-qn*u[n-1])/(qn*y2[n-1]+1.0);
for (k=n-1;k>=1;k--) { //This is the backsubstitution loop of the tridiagonal
y2[k]=y2[k]*y2[k+1]+u[k]; //algorithm.
}
free_vector(u,1,n-1);
}
void splint(float xa[], float ya[], float y2a[], int n, float x, float *y)
/*Given the arrays xa[1..n] and ya[1..n], which tabulate a function (with the xai's in order),
and given the array y2a[1..n], which is the output from spline above, and given a value of
x, this routine returns a cubic-spline interpolated value y.*/
{
void nrerror(char error_text[]);
int klo,khi,k;
float h,b,a;
klo=1; // We will find the right place in the table by means of
khi=n; //bisection. This is optimal if sequential calls to this
while (khi-klo > 1) { //routine are at random values of x. If sequential calls
k=(khi+klo) >> 1; //are in order, and closely spaced, one would do better
if (xa[k] > x) khi=k; //to store previous values of klo and khi and test if
else klo=k; //they remain appropriate on the next call.
} // klo and khi now bracket the input value of x.
h=xa[khi]-xa[klo];
if (h == 0.0) nrerror("Bad xa input to routine splint"); //The xa's must be distinct.
a=(xa[khi]-x)/h;
b=(x-xa[klo])/h; //Cubic spline polynomial is now evaluated.
*y=a*ya[klo]+b*ya[khi]+((a*a*a-a)*y2a[klo]+(b*b*b-b)*y2a[khi])*(h*h)/6.0;
}
unsigned long *lvector(long nl, long nh)
/* allocate an unsigned long vector with subscript range v[nl..nh] */
{
unsigned long *v;
v = (unsigned long *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(long)));
if(!v) nrerror("allocation failure in lvector()");
return v - nl + NR_END;
}
void free_lvector(unsigned long *v, long nl, long nh)
/* free an unsigned long vector allocated with lvector() */
{
free((FREE_ARG) (v+nl-NR_END));
}
/* dnbiasdM */
double dnbiasdM(double M, float z, double M_o, float del_o){
double sigsq, del, sig_one, sig_o;
if ((M_o-M) < TINY){
LOG_ERROR("In function dnbiasdM: M must be less than M_o!\nAborting...\n");
Throw(ValueError);
}
del = Deltac/dicke(z) - del_o;
if (del < 0){
LOG_ERROR(" In function dnbiasdM: del_o must be less than del_1 = del_crit/dicke(z)!\nAborting...\n");
Throw(ValueError);
}
sig_o = sigma_z0(M_o);
sig_one = sigma_z0(M);
sigsq = sig_one*sig_one - sig_o*sig_o;
return -(RHOcrit*cosmo_params_ps->OMm)/M /sqrt(2*PI) *del*pow(sigsq,-1.5)*pow(E, -0.5*del*del/sigsq)*dsigmasqdm_z0(M);
}
/*
calculates the fraction of mass contained in haloes with mass > M at redshift z, in regions with a linear overdensity of del_bias, and standard deviation sig_bias
*/
double FgtrM_bias(double z, double M, double del_bias, double sig_bias){
double del, sig, sigsmallR;
sigsmallR = sigma_z0(M);
if (!(sig_bias < sigsmallR)){ // biased region is smaller that halo!
// fprintf(stderr, "FgtrM_bias: Biased region is smaller than halo!\nResult is bogus.\n");
// return 0;
return 0.000001;
}
del = Deltac/dicke(z) - del_bias;
sig = sqrt(sigsmallR*sigsmallR - sig_bias*sig_bias);
return splined_erfc(del / (sqrt(2)*sig));
}
/* Uses sigma parameters instead of Mass for scale */
double sigmaparam_FgtrM_bias(float z, float sigsmallR, float del_bias, float sig_bias){
double del, sig;
if (!(sig_bias < sigsmallR)){ // biased region is smaller that halo!
// fprintf(stderr, "local_FgtrM_bias: Biased region is smaller than halo!\nResult is bogus.\n");
// return 0;
return 0.000001;
}
del = Deltac/dicke(z) - del_bias;
sig = sqrt(sigsmallR*sigsmallR - sig_bias*sig_bias);
return splined_erfc(del / (sqrt(2)*sig));
}
/* redshift derivative of the growth function at z */
double ddicke_dz(double z){
float dz = 1e-10;
double omegaM_z, ddickdz, dick_0, x, x_0, domegaMdz;
return (dicke(z+dz)-dicke(z))/dz;
}
/* compute a mass limit where the stellar baryon fraction and the escape fraction exceed unity */
float Mass_limit (float logM, float PL, float FRAC) {
return FRAC*pow(pow(10.,logM)/1e10,PL);
}
void bisection(float *x, float xlow, float xup, int *iter){
*x=(xlow + xup)/2.;
++(*iter);
}
float Mass_limit_bisection(float Mmin, float Mmax, float PL, float FRAC){
int i, iter, max_iter=200;
float rel_tol=0.001;
float logMlow, logMupper, x, x1;
iter = 0;
logMlow = log10(Mmin);
logMupper = log10(Mmax);
if (PL < 0.) {
if (Mass_limit(logMlow,PL,FRAC) <= 1.) {
return Mmin;
}
}
else if (PL > 0.) {
if (Mass_limit(logMupper,PL,FRAC) <= 1.) {
return Mmax;
}
}
else
return 0;
bisection(&x, logMlow, logMupper, &iter);
do {
if((Mass_limit(logMlow,PL,FRAC)-1.)*(Mass_limit(x,PL,FRAC)-1.) < 0.)
logMupper = x;
else
logMlow = x;
bisection(&x1, logMlow, logMupper, &iter);
if(fabs(x1-x) < rel_tol) {
return pow(10.,x1);
}
x = x1;
}
while(iter < max_iter);
// Got to max_iter without finding a solution.
LOG_ERROR("Failed to find a mass limit to regulate stellar fraction/escape fraction is between 0 and 1.");
LOG_ERROR(" The solution does not converge or iterations are not sufficient.");
// Throw(ParameterError);
Throw(MassDepZetaError);
return(0.0);
}
int initialise_ComputeLF(int nbins, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options) {
Broadcast_struct_global_PS(user_params,cosmo_params);
Broadcast_struct_global_UF(user_params,cosmo_params);
lnMhalo_param = calloc(nbins,sizeof(double));
Muv_param = calloc(nbins,sizeof(double));
Mhalo_param = calloc(nbins,sizeof(double));
LF_spline_acc = gsl_interp_accel_alloc();
LF_spline = gsl_spline_alloc(gsl_interp_cspline, nbins);
init_ps();
int status;
Try initialiseSigmaMInterpTable(0.999*Mhalo_min,1.001*Mhalo_max);
Catch(status) {
LOG_ERROR("\t...called from initialise_ComputeLF");
return(status);
}
initialised_ComputeLF = true;
return(0);
}
void cleanup_ComputeLF(){
free(lnMhalo_param);
free(Muv_param);
free(Mhalo_param);
gsl_spline_free (LF_spline);
gsl_interp_accel_free(LF_spline_acc);
freeSigmaMInterpTable();
initialised_ComputeLF = 0;
}
int ComputeLF(int nbins, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params,
struct FlagOptions *flag_options, int component, int NUM_OF_REDSHIFT_FOR_LF, float *z_LF, float *M_TURNs, double *M_uv_z, double *M_h_z, double *log10phi) {
/*
This is an API-level function and thus returns an int status.
*/
int status;
Try{ // This try block covers the whole function.
// This NEEDS to be done every time, because the actual object passed in as
// user_params, cosmo_params etc. can change on each call, freeing up the memory.
initialise_ComputeLF(nbins, user_params,cosmo_params,astro_params,flag_options);
int i,i_z;
int i_unity, i_smth, mf, nbins_smth=7;
double dlnMhalo, lnMhalo_i, SFRparam, Muv_1, Muv_2, dMuvdMhalo;
double Mhalo_i, lnMhalo_min, lnMhalo_max, lnMhalo_lo, lnMhalo_hi, dlnM, growthf;
double f_duty_upper, Mcrit_atom;
float Fstar, Fstar_temp;
double dndm;
int gsl_status;
gsl_set_error_handler_off();
if (astro_params->ALPHA_STAR < -0.5)
LOG_WARNING(
"ALPHA_STAR is %f, which is unphysical value given the observational LFs.\n"\
"Also, when ALPHA_STAR < -.5, LFs may show a kink. It is recommended to set ALPHA_STAR > -0.5.",
astro_params->ALPHA_STAR
);
mf = user_params_ps->HMF;
lnMhalo_min = log(Mhalo_min*0.999);
lnMhalo_max = log(Mhalo_max*1.001);
dlnMhalo = (lnMhalo_max - lnMhalo_min)/(double)(nbins - 1);
for (i_z=0; i_z<NUM_OF_REDSHIFT_FOR_LF; i_z++) {
growthf = dicke(z_LF[i_z]);
Mcrit_atom = atomic_cooling_threshold(z_LF[i_z]);
i_unity = -1;
for (i=0; i<nbins; i++) {
// generate interpolation arrays
lnMhalo_param[i] = lnMhalo_min + dlnMhalo*(double)i;
Mhalo_i = exp(lnMhalo_param[i]);
if (component == 1)
Fstar = astro_params->F_STAR10*pow(Mhalo_i/1e10,astro_params->ALPHA_STAR);
else
Fstar = astro_params->F_STAR7_MINI*pow(Mhalo_i/1e7,astro_params->ALPHA_STAR_MINI);
if (Fstar > 1.) Fstar = 1;
if (i_unity < 0) { // Find the array number at which Fstar crosses unity.
if (astro_params->ALPHA_STAR > 0.) {
if ( (1.- Fstar) < FRACT_FLOAT_ERR ) i_unity = i;
}
else if (astro_params->ALPHA_STAR < 0. && i < nbins-1) {
if (component == 1)
Fstar_temp = astro_params->F_STAR10*pow( exp(lnMhalo_min + dlnMhalo*(double)(i+1))/1e10,astro_params->ALPHA_STAR);
else
Fstar_temp = astro_params->F_STAR7_MINI*pow( exp(lnMhalo_min + dlnMhalo*(double)(i+1))/1e7,astro_params->ALPHA_STAR_MINI);
if (Fstar_temp < 1. && (1.- Fstar) < FRACT_FLOAT_ERR) i_unity = i;
}
}
// parametrization of SFR
SFRparam = Mhalo_i * cosmo_params->OMb/cosmo_params->OMm * (double)Fstar * (double)(hubble(z_LF[i_z])*SperYR/astro_params->t_STAR); // units of M_solar/year
Muv_param[i] = 51.63 - 2.5*log10(SFRparam*Luv_over_SFR); // UV magnitude
// except if Muv value is nan or inf, but avoid error put the value as 10.
if ( isinf(Muv_param[i]) || isnan(Muv_param[i]) ) Muv_param[i] = 10.;
M_uv_z[i + i_z*nbins] = Muv_param[i];
}
gsl_status = gsl_spline_init(LF_spline, lnMhalo_param, Muv_param, nbins);
GSL_ERROR(gsl_status);
lnMhalo_lo = log(Mhalo_min);
lnMhalo_hi = log(Mhalo_max);
dlnM = (lnMhalo_hi - lnMhalo_lo)/(double)(nbins - 1);
// There is a kink on LFs at which Fstar crosses unity. This kink is a numerical artefact caused by the derivate of dMuvdMhalo.
// Most of the cases the kink doesn't appear in magnitude ranges we are interested (e.g. -22 < Muv < -10). However, for some extreme
// parameters, it appears. To avoid this kink, we use the interpolation of the derivate in the range where the kink appears.
// 'i_unity' is the array number at which the kink appears. 'i_unity-3' and 'i_unity+12' are related to the range of interpolation,
// which is an arbitrary choice.
// NOTE: This method does NOT work in cases with ALPHA_STAR < -0.5. But, this parameter range is unphysical given that the
// observational LFs favour positive ALPHA_STAR in this model.
// i_smth = 0: calculates LFs without interpolation.
// i_smth = 1: calculates LFs using interpolation where Fstar crosses unity.
if (i_unity-3 < 0) i_smth = 0;
else if (i_unity+12 > nbins-1) i_smth = 0;
else i_smth = 1;
if (i_smth == 0) {
for (i=0; i<nbins; i++) {
// calculate luminosity function
lnMhalo_i = lnMhalo_lo + dlnM*(double)i;
Mhalo_param[i] = exp(lnMhalo_i);
M_h_z[i + i_z*nbins] = Mhalo_param[i];
Muv_1 = gsl_spline_eval(LF_spline, lnMhalo_i - delta_lnMhalo, LF_spline_acc);
Muv_2 = gsl_spline_eval(LF_spline, lnMhalo_i + delta_lnMhalo, LF_spline_acc);
dMuvdMhalo = (Muv_2 - Muv_1) / (2.*delta_lnMhalo * exp(lnMhalo_i));
if (component == 1)
f_duty_upper = 1.;
else
f_duty_upper = exp(-(Mhalo_param[i]/Mcrit_atom));
if(mf==0) {
log10phi[i + i_z*nbins] = log10( dNdM(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) );
}
else if(mf==1) {
log10phi[i + i_z*nbins] = log10( dNdM_st(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) );
}
else if(mf==2) {
log10phi[i + i_z*nbins] = log10( dNdM_WatsonFOF(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) );
}
else if(mf==3) {
log10phi[i + i_z*nbins] = log10( dNdM_WatsonFOF_z(z_LF[i_z], growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) );
}
else{
LOG_ERROR("HMF should be between 0-3, got %d", mf);
Throw(ValueError);
}
if (isinf(log10phi[i + i_z*nbins]) || isnan(log10phi[i + i_z*nbins]) || log10phi[i + i_z*nbins] < -30.)
log10phi[i + i_z*nbins] = -30.;
}
}
else {
lnM_temp = calloc(nbins_smth,sizeof(double));
deriv_temp = calloc(nbins_smth,sizeof(double));
deriv = calloc(nbins,sizeof(double));
for (i=0; i<nbins; i++) {
// calculate luminosity function
lnMhalo_i = lnMhalo_lo + dlnM*(double)i;
Mhalo_param[i] = exp(lnMhalo_i);
M_h_z[i + i_z*nbins] = Mhalo_param[i];
Muv_1 = gsl_spline_eval(LF_spline, lnMhalo_i - delta_lnMhalo, LF_spline_acc);
Muv_2 = gsl_spline_eval(LF_spline, lnMhalo_i + delta_lnMhalo, LF_spline_acc);
dMuvdMhalo = (Muv_2 - Muv_1) / (2.*delta_lnMhalo * exp(lnMhalo_i));
deriv[i] = fabs(dMuvdMhalo);
}
deriv_spline_acc = gsl_interp_accel_alloc();
deriv_spline = gsl_spline_alloc(gsl_interp_cspline, nbins_smth);
// generate interpolation arrays to smooth discontinuity of the derivative causing a kink
// Note that the number of array elements and the range of interpolation are made by arbitrary choices.
lnM_temp[0] = lnMhalo_param[i_unity - 3];
lnM_temp[1] = lnMhalo_param[i_unity - 2];
lnM_temp[2] = lnMhalo_param[i_unity + 8];
lnM_temp[3] = lnMhalo_param[i_unity + 9];
lnM_temp[4] = lnMhalo_param[i_unity + 10];
lnM_temp[5] = lnMhalo_param[i_unity + 11];
lnM_temp[6] = lnMhalo_param[i_unity + 12];
deriv_temp[0] = deriv[i_unity - 3];
deriv_temp[1] = deriv[i_unity - 2];
deriv_temp[2] = deriv[i_unity + 8];
deriv_temp[3] = deriv[i_unity + 9];
deriv_temp[4] = deriv[i_unity + 10];
deriv_temp[5] = deriv[i_unity + 11];
deriv_temp[6] = deriv[i_unity + 12];
gsl_status = gsl_spline_init(deriv_spline, lnM_temp, deriv_temp, nbins_smth);
GSL_ERROR(gsl_status);
for (i=0;i<9;i++){
deriv[i_unity + i - 1] = gsl_spline_eval(deriv_spline, lnMhalo_param[i_unity + i - 1], deriv_spline_acc);
}
for (i=0; i<nbins; i++) {
if (component == 1)
f_duty_upper = 1.;
else
f_duty_upper = exp(-(Mhalo_param[i]/Mcrit_atom));
if(mf==0)
dndm = dNdM(growthf, Mhalo_param[i]);
else if(mf==1)
dndm = dNdM_st(growthf, Mhalo_param[i]);
else if(mf==2)
dndm = dNdM_WatsonFOF(growthf, Mhalo_param[i]);
else if(mf==3)
dndm = dNdM_WatsonFOF_z(z_LF[i_z], growthf, Mhalo_param[i]);
else{
LOG_ERROR("HMF should be between 0-3, got %d", mf);
Throw(ValueError);
}
log10phi[i + i_z*nbins] = log10(dndm * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / deriv[i]);
if (isinf(log10phi[i + i_z*nbins]) || isnan(log10phi[i + i_z*nbins]) || log10phi[i + i_z*nbins] < -30.)
log10phi[i + i_z*nbins] = -30.;
}
}
}
cleanup_ComputeLF();
} // End try
Catch(status){
return status;
}
return(0);
}
void initialiseGL_Nion_Xray(int n, float M_Min, float M_Max){
//calculates the weightings and the positions for Gauss-Legendre quadrature.
gauleg(log(M_Min),log(M_Max),xi_SFR_Xray,wi_SFR_Xray,n);
}
float dNdM_conditional(float growthf, float M1, float M2, float delta1, float delta2, float sigma2){
float sigma1, dsigmadm,dsigma_val;
float MassBinLow;
int MassBin;
if(user_params_ps->USE_INTERPOLATION_TABLES) {
MassBin = (int)floor( (M1 - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma1 = Sigma_InterpTable[MassBin] + ( M1 - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
dsigma_val = dSigmadm_InterpTable[MassBin] + ( M1 - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width;
dsigmadm = -pow(10.,dsigma_val);
}
else {
sigma1 = sigma_z0(exp(M1));
dsigmadm = dsigmasqdm_z0(exp(M1));
}
M1 = exp(M1);
M2 = exp(M2);
sigma1 = sigma1*sigma1;
sigma2 = sigma2*sigma2;
dsigmadm = dsigmadm/(2.0*sigma1); // This is actually sigma1^{2} as calculated above, however, it should just be sigma1. It cancels with the same factor below. Why I have decided to write it like that I don't know!
if((sigma1 > sigma2)) {
return -(( delta1 - delta2 )/growthf)*( 2.*sigma1*dsigmadm )*( exp( - ( delta1 - delta2 )*( delta1 - delta2 )/( 2.*growthf*growthf*( sigma1 - sigma2 ) ) ) )/(pow( sigma1 - sigma2, 1.5));
}
else if(sigma1==sigma2) {
return -(( delta1 - delta2 )/growthf)*( 2.*sigma1*dsigmadm )*( exp( - ( delta1 - delta2 )*( delta1 - delta2 )/( 2.*growthf*growthf*( 1.e-6 ) ) ) )/(pow( 1.e-6, 1.5));
}
else {
return 0.;
}
}
void initialiseGL_Nion(int n, float M_Min, float M_Max){
//calculates the weightings and the positions for Gauss-Legendre quadrature.
gauleg(log(M_Min),log(M_Max),xi_SFR,wi_SFR,n);
}
double dNion_ConditionallnM_MINI(double lnM, void *params) {
struct parameters_gsl_SFR_con_int_ vals = *(struct parameters_gsl_SFR_con_int_ *)params;
double M = exp(lnM); // linear scale
double growthf = vals.gf_obs;
double M2 = vals.Mval; // natural log scale
double sigma2 = vals.sigma2;
double del1 = vals.delta1;
double del2 = vals.delta2;
double MassTurnover = vals.Mdrop;
double MassTurnover_upper = vals.Mdrop_upper;
double Alpha_star = vals.pl_star;
double Alpha_esc = vals.pl_esc;
double Fstar7_MINI = vals.frac_star;
double Fesc7_MINI = vals.frac_esc;
double Mlim_Fstar = vals.LimitMass_Fstar;
double Mlim_Fesc = vals.LimitMass_Fesc;
double Fstar,Fesc;
if (Alpha_star > 0. && M > Mlim_Fstar)
Fstar = 1./Fstar7_MINI;
else if (Alpha_star < 0. && M < Mlim_Fstar)
Fstar = 1./Fstar7_MINI;
else
Fstar = pow(M/1e7,Alpha_star);
if (Alpha_esc > 0. && M > Mlim_Fesc)
Fesc = 1./Fesc7_MINI;
else if (Alpha_esc < 0. && M < Mlim_Fesc)
Fesc = 1./Fesc7_MINI;
else
Fesc = pow(M/1e7,Alpha_esc);
return M*exp(-MassTurnover/M)*exp(-M/MassTurnover_upper)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI);
}
double dNion_ConditionallnM(double lnM, void *params) {
struct parameters_gsl_SFR_con_int_ vals = *(struct parameters_gsl_SFR_con_int_ *)params;
double M = exp(lnM); // linear scale
double growthf = vals.gf_obs;
double M2 = vals.Mval; // natural log scale
double sigma2 = vals.sigma2;
double del1 = vals.delta1;
double del2 = vals.delta2;
double MassTurnover = vals.Mdrop;
double Alpha_star = vals.pl_star;
double Alpha_esc = vals.pl_esc;
double Fstar10 = vals.frac_star;
double Fesc10 = vals.frac_esc;
double Mlim_Fstar = vals.LimitMass_Fstar;
double Mlim_Fesc = vals.LimitMass_Fesc;
double Fstar,Fesc;
if (Alpha_star > 0. && M > Mlim_Fstar)
Fstar = 1./Fstar10;
else if (Alpha_star < 0. && M < Mlim_Fstar)
Fstar = 1./Fstar10;
else
Fstar = pow(M/1e10,Alpha_star);
if (Alpha_esc > 0. && M > Mlim_Fesc)
Fesc = 1./Fesc10;
else if (Alpha_esc < 0. && M < Mlim_Fesc)
Fesc = 1./Fesc10;
else
Fesc = pow(M/1e10,Alpha_esc);
return M*exp(-MassTurnover/M)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI);
}
double Nion_ConditionalM_MINI(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES) {
if (FAST_FCOLL_TABLES) { //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff.
return GaussLegendreQuad_Nion_MINI(0, 0, (float) growthf, (float) M2, (float) sigma2, (float) delta1, (float) delta2, (float) MassTurnover, (float) MassTurnover_upper, (float) Alpha_star, (float) Alpha_esc, (float) Fstar10, (float) Fesc10, (float) Mlim_Fstar, (float) Mlim_Fesc, FAST_FCOLL_TABLES);
}
else{ //standard old code
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.01; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = {
.gf_obs = growthf,
.Mval = M2,
.sigma2 = sigma2,
.delta1 = delta1,
.delta2 = delta2,
.Mdrop = MassTurnover,
.Mdrop_upper = MassTurnover_upper,
.pl_star = Alpha_star,
.pl_esc = Alpha_esc,
.frac_star = Fstar10,
.frac_esc = Fesc10,
.LimitMass_Fstar = Mlim_Fstar,
.LimitMass_Fesc = Mlim_Fesc
};
int status;
F.function = &dNion_ConditionallnM_MINI;
F.params = ¶meters_gsl_SFR_con;
lower_limit = M1;
upper_limit = M2;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: growthf=%e M2=%e sigma2=%e delta1=%e delta2=%e MassTurnover=%e",growthf,M2,sigma2,delta1,delta2,MassTurnover);
LOG_ERROR("data: MassTurnover_upper=%e Alpha_star=%e Alpha_esc=%e Fstar10=%e Fesc10=%e Mlim_Fstar=%e Mlim_Fesc=%e",MassTurnover_upper,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
if(delta2 > delta1) {
result = 1.;
return result;
}
else {
return result;
}
}
}
double Nion_ConditionalM(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES) {
if (FAST_FCOLL_TABLES && global_params.USE_FAST_ATOMIC) { //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff.
return GaussLegendreQuad_Nion(0, 0, (float) growthf, (float) M2, (float) sigma2, (float) delta1, (float) delta2, (float) MassTurnover, (float) Alpha_star, (float) Alpha_esc, (float) Fstar10, (float) Fesc10, (float) Mlim_Fstar, (float) Mlim_Fesc, FAST_FCOLL_TABLES);
}
else{ //standard
double result, error, lower_limit, upper_limit;
gsl_function F;
double rel_tol = 0.01; //<- relative tolerance
gsl_integration_workspace * w
= gsl_integration_workspace_alloc (1000);
struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = {
.gf_obs = growthf,
.Mval = M2,
.sigma2 = sigma2,
.delta1 = delta1,
.delta2 = delta2,
.Mdrop = MassTurnover,
.pl_star = Alpha_star,
.pl_esc = Alpha_esc,
.frac_star = Fstar10,
.frac_esc = Fesc10,
.LimitMass_Fstar = Mlim_Fstar,
.LimitMass_Fesc = Mlim_Fesc
};
F.function = &dNion_ConditionallnM;
F.params = ¶meters_gsl_SFR_con;
lower_limit = M1;
upper_limit = M2;
int status;
gsl_set_error_handler_off();
status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,
1000, GSL_INTEG_GAUSS61, w, &result, &error);
if(status!=0) {
LOG_ERROR("gsl integration error occured!");
LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error);
LOG_ERROR("data: growthf=%e M1=%e M2=%e sigma2=%e delta1=%e delta2=%e",growthf,M1,M2,sigma2,delta1,delta2);
LOG_ERROR("data: MassTurnover=%e Alpha_star=%e Alpha_esc=%e Fstar10=%e Fesc10=%e Mlim_Fstar=%e Mlim_Fesc=%e",MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc);
GSL_ERROR(status);
}
gsl_integration_workspace_free (w);
if(delta2 > delta1) {
result = 1.;
return result;
}
else {
return result;
}
}
}
float Nion_ConditionallnM_GL_MINI(float lnM, struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con){
float M = exp(lnM);
float growthf = parameters_gsl_SFR_con.gf_obs;
float M2 = parameters_gsl_SFR_con.Mval;
float sigma2 = parameters_gsl_SFR_con.sigma2;
float del1 = parameters_gsl_SFR_con.delta1;
float del2 = parameters_gsl_SFR_con.delta2;
float MassTurnover = parameters_gsl_SFR_con.Mdrop;
float MassTurnover_upper = parameters_gsl_SFR_con.Mdrop_upper;
float Alpha_star = parameters_gsl_SFR_con.pl_star;
float Alpha_esc = parameters_gsl_SFR_con.pl_esc;
float Fstar7_MINI = parameters_gsl_SFR_con.frac_star;
float Fesc7_MINI = parameters_gsl_SFR_con.frac_esc;
float Mlim_Fstar = parameters_gsl_SFR_con.LimitMass_Fstar;
float Mlim_Fesc = parameters_gsl_SFR_con.LimitMass_Fesc;
float Fstar,Fesc;
if (Alpha_star > 0. && M > Mlim_Fstar)
Fstar = 1./Fstar7_MINI;
else if (Alpha_star < 0. && M < Mlim_Fstar)
Fstar = 1./Fstar7_MINI;
else
Fstar = pow(M/1e7,Alpha_star);
if (Alpha_esc > 0. && M > Mlim_Fesc)
Fesc = 1./Fesc7_MINI;
else if (Alpha_esc < 0. && M < Mlim_Fesc)
Fesc = 1./Fesc7_MINI;
else
Fesc = pow(M/1e7,Alpha_esc);
return M*exp(-MassTurnover/M)*exp(-M/MassTurnover_upper)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI);
}
float Nion_ConditionallnM_GL(float lnM, struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con){
float M = exp(lnM);
float growthf = parameters_gsl_SFR_con.gf_obs;
float M2 = parameters_gsl_SFR_con.Mval;
float sigma2 = parameters_gsl_SFR_con.sigma2;
float del1 = parameters_gsl_SFR_con.delta1;
float del2 = parameters_gsl_SFR_con.delta2;
float MassTurnover = parameters_gsl_SFR_con.Mdrop;
float Alpha_star = parameters_gsl_SFR_con.pl_star;
float Alpha_esc = parameters_gsl_SFR_con.pl_esc;
float Fstar10 = parameters_gsl_SFR_con.frac_star;
float Fesc10 = parameters_gsl_SFR_con.frac_esc;
float Mlim_Fstar = parameters_gsl_SFR_con.LimitMass_Fstar;
float Mlim_Fesc = parameters_gsl_SFR_con.LimitMass_Fesc;
float Fstar,Fesc;
if (Alpha_star > 0. && M > Mlim_Fstar)
Fstar = 1./Fstar10;
else if (Alpha_star < 0. && M < Mlim_Fstar)
Fstar = 1./Fstar10;
else
Fstar = pow(M/1e10,Alpha_star);
if (Alpha_esc > 0. && M > Mlim_Fesc)
Fesc = 1./Fesc10;
else if (Alpha_esc < 0. && M < Mlim_Fesc)
Fesc = 1./Fesc10;
else
Fesc = pow(M/1e10,Alpha_esc);
return M*exp(-MassTurnover/M)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI);
}
//JBM: Same as above but for minihaloes. Has two cutoffs, lower and upper.
float GaussLegendreQuad_Nion_MINI(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float MassTurnover_upper, float Alpha_star, float Alpha_esc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES) {
double result, nu_lower_limit, nu_higher_limit, nupivot;
int i;
double integrand, x;
integrand = 0.;
struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = {
.gf_obs = growthf,
.Mval = M2,
.sigma2 = sigma2,
.delta1 = delta1,
.delta2 = delta2,
.Mdrop = MassTurnover,
.Mdrop_upper = MassTurnover_upper,
.pl_star = Alpha_star,
.pl_esc = Alpha_esc,
.frac_star = Fstar7_MINI,
.frac_esc = Fesc7_MINI,
.LimitMass_Fstar = Mlim_Fstar_MINI,
.LimitMass_Fesc = Mlim_Fesc_MINI
};
if(delta2 > delta1*0.9999) {
result = 1.;
return result;
}
if(FAST_FCOLL_TABLES){ //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff.
if(MassTurnover_upper <= MassTurnover){
return 1e-40; //in sharp cut it's zero
}
double delta_arg = pow( (delta1 - delta2)/growthf , 2.);
double LogMass=log(MassTurnover);
int MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
double MassBinLow = MinMass + mass_bin_width*(double)MassBin;
double sigmaM1 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
nu_lower_limit = delta_arg/(sigmaM1 * sigmaM1 - sigma2 * sigma2);
LogMass = log(MassTurnover_upper);
MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(double)MassBin;
double sigmaM2 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
nu_higher_limit = delta_arg/(sigmaM2*sigmaM2-sigma2*sigma2);
//note we keep nupivot1 just in case very negative delta makes it reach that nu
LogMass = log(MPIVOT1); //jbm could be done outside and it'd be even faster
int MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
double MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot;
double sigmapivot1 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width;
double nupivot1 = delta_arg/(sigmapivot1*sigmapivot1); //note, it does not have the sigma2 on purpose.
LogMass = log(MPIVOT2); //jbm could be done outside and it'd be even faster
MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot;
double sigmapivot2 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width;
double nupivot2 = delta_arg/(sigmapivot2*sigmapivot2);
double beta1 = (Alpha_star+Alpha_esc) * AINDEX1 * (0.5); //exponent for Fcollapprox for nu>nupivot1 (large M)
double beta2 = (Alpha_star+Alpha_esc) * AINDEX2 * (0.5); //exponent for Fcollapprox for nupivot1>nu>nupivot2 (small M)
double beta3 = (Alpha_star+Alpha_esc) * AINDEX3 * (0.5); //exponent for Fcollapprox for nu<nupivot2 (smallest M)
//beta2 fixed by continuity.
// // 3PLs
double fcollres=0.0;
double fcollres_high=0.0; //for the higher threshold to subtract
// re-written for further speedups
if (nu_higher_limit <= nupivot2){ //if both are below pivot2 don't bother adding and subtracting the high contribution
fcollres=(Fcollapprox(nu_lower_limit,beta3))*pow(nupivot2,-beta3);
fcollres_high=(Fcollapprox(nu_higher_limit,beta3))*pow(nupivot2,-beta3);
}
else {
fcollres_high=(Fcollapprox(nu_higher_limit,beta2))*pow(nupivot1,-beta2);
if (nu_lower_limit > nupivot2){
fcollres=(Fcollapprox(nu_lower_limit,beta2))*pow(nupivot1,-beta2);
}
else {
fcollres=(Fcollapprox(nupivot2,beta2))*pow(nupivot1,-beta2);
fcollres+=(Fcollapprox(nu_lower_limit,beta3)-Fcollapprox(nupivot2,beta3) )*pow(nupivot2,-beta3);
}
}
if (fcollres < fcollres_high){
return 1e-40;
}
return (fcollres-fcollres_high);
}
else{
for(i=1; i<(n+1); i++){
if(Type==1) {
x = xi_SFR_Xray[i];
integrand += wi_SFR_Xray[i]*Nion_ConditionallnM_GL_MINI(x,parameters_gsl_SFR_con);
}
if(Type==0) {
x = xi_SFR[i];
integrand += wi_SFR[i]*Nion_ConditionallnM_GL_MINI(x,parameters_gsl_SFR_con);
}
}
return integrand;
}
}
//JBM: Added the approximation if user_params->FAST_FCOLL_TABLES==True
float GaussLegendreQuad_Nion(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES) {
//Performs the Gauss-Legendre quadrature.
int i;
double result, nu_lower_limit, nupivot;
if(delta2 > delta1*0.9999) {
result = 1.;
return result;
}
double integrand, x;
integrand = 0.;
struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = {
.gf_obs = growthf,
.Mval = M2,
.sigma2 = sigma2,
.delta1 = delta1,
.delta2 = delta2,
.Mdrop = MassTurnover,
.pl_star = Alpha_star,
.pl_esc = Alpha_esc,
.frac_star = Fstar10,
.frac_esc = Fesc10,
.LimitMass_Fstar = Mlim_Fstar,
.LimitMass_Fesc = Mlim_Fesc
};
if (FAST_FCOLL_TABLES && global_params.USE_FAST_ATOMIC){ //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff.
double delta_arg = pow( (delta1 - delta2)/growthf , 2.0);
double LogMass=log(MassTurnover);
int MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
double MassBinLow = MinMass + mass_bin_width*(double)MassBin;
double sigmaM1 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
nu_lower_limit = delta_arg/(sigmaM1*sigmaM1-sigma2*sigma2);
LogMass = log(MPIVOT1); //jbm could be done outside and it'd be even faster
int MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
double MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot;
double sigmapivot1 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width;
double nupivot1 = delta_arg/(sigmapivot1*sigmapivot1); //note, it does not have the sigma2 on purpose.
LogMass = log(MPIVOT2); //jbm could be done outside and it'd be even faster
MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width );
MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot;
double sigmapivot2 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width;
double nupivot2 = delta_arg/(sigmapivot2*sigmapivot2);
double beta1 = (Alpha_star+Alpha_esc) * AINDEX1 * (0.5); //exponent for Fcollapprox for nu>nupivot1 (large M)
double beta2 = (Alpha_star+Alpha_esc) * AINDEX2 * (0.5); //exponent for Fcollapprox for nupivot2<nu<nupivot1 (small M)
double beta3 = (Alpha_star+Alpha_esc) * AINDEX3 * (0.5); //exponent for Fcollapprox for nu<nupivot2 (smallest M)
//beta2 fixed by continuity.
double nucrit_sigma2 = delta_arg*pow(sigma2+1e-10,-2.0); //above this nu sigma2>sigma1, so HMF=0. eps added to avoid infinities
// // 3PLs
double fcollres=0.0;
if(nu_lower_limit >= nucrit_sigma2){ //fully in the flat part of sigma(nu), M^alpha is nu-independent.
return 1e-40;
}
else{ //we subtract the contribution from high nu, since the HMF is set to 0 if sigma2>sigma1
fcollres -= Fcollapprox(nucrit_sigma2,beta1)*pow(nupivot1,-beta1);
}
if(nu_lower_limit >= nupivot1){
fcollres+=Fcollapprox(nu_lower_limit,beta1)*pow(nupivot1,-beta1);
}
else{
fcollres+=Fcollapprox(nupivot1,beta1)*pow(nupivot1,-beta1);
if (nu_lower_limit > nupivot2){
fcollres+=(Fcollapprox(nu_lower_limit,beta2)-Fcollapprox(nupivot1,beta2))*pow(nupivot1,-beta2);
}
else {
fcollres+=(Fcollapprox(nupivot2,beta2)-Fcollapprox(nupivot1,beta2) )*pow(nupivot1,-beta2);
fcollres+=(Fcollapprox(nu_lower_limit,beta3)-Fcollapprox(nupivot2,beta3) )*pow(nupivot2,-beta3);
}
}
if (fcollres<=0.0){
LOG_DEBUG("Negative fcoll? fc=%.1le Mt=%.1le \n",fcollres, MassTurnover);
fcollres=1e-40;
}
return fcollres;
}
else{
for(i=1; i<(n+1); i++){
if(Type==1) {
x = xi_SFR_Xray[i];
integrand += wi_SFR_Xray[i]*Nion_ConditionallnM_GL(x,parameters_gsl_SFR_con);
}
if(Type==0) {
x = xi_SFR[i];
integrand += wi_SFR[i]*Nion_ConditionallnM_GL(x,parameters_gsl_SFR_con);
}
}
return integrand;
}
}
#include <gsl/gsl_sf_gamma.h>
//JBM: Integral of a power-law times exponential for EPS: \int dnu nu^beta * exp(-nu/2)/sqrt(nu) from numin to infty.
double Fcollapprox (double numin, double beta){
//nu is deltacrit^2/sigma^2, corrected by delta(R) and sigma(R)
double gg = gsl_sf_gamma_inc(0.5+beta,0.5*numin);
return gg*pow(2,0.5+beta)*pow(2.0*PI,-0.5);
}
void initialise_Nion_General_spline(float z, float min_density, float max_density, float Mmax, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES){
float Mmin = MassTurnover/50.;
double overdense_val, growthf, sigma2;
double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999;
double overdense_small_high, overdense_small_low;
int i;
float ln_10;
if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) {
overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001;
}
else {
overdense_small_high = max_density;
}
overdense_small_low = min_density;
ln_10 = log(10);
float MassBinLow;
int MassBin;
growthf = dicke(z);
Mmin = log(Mmin);
Mmax = log(Mmax);
MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
#pragma omp parallel shared(log10_overdense_spline_SFR,log10_Nion_spline,overdense_small_low,overdense_small_high,growthf,Mmax,sigma2,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc) private(i,overdense_val) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<NSFR_low; i++){
overdense_val = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low));
log10_overdense_spline_SFR[i] = overdense_val;
log10_Nion_spline[i] = GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,pow(10.,overdense_val)-1.,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES);
if(fabs(log10_Nion_spline[i]) < 1e-38) {
log10_Nion_spline[i] = 1e-38;
}
log10_Nion_spline[i] = log10(log10_Nion_spline[i]);
if(log10_Nion_spline[i] < -40.){
log10_Nion_spline[i] = -40.;
}
log10_Nion_spline[i] *= ln_10;
}
}
for (i=0; i<NSFR_low; i++){
if(!isfinite(log10_Nion_spline[i])) {
LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
#pragma omp parallel shared(Overdense_spline_SFR,Nion_spline,overdense_large_low,overdense_large_high,growthf,Mmin,Mmax,sigma2,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc) private(i) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for(i=0;i<NSFR_high;i++) {
Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low);
Nion_spline[i] = Nion_ConditionalM(growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i],MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES);
if(Nion_spline[i]<0.) {
Nion_spline[i]=pow(10.,-40.0);
}
}
}
for(i=0;i<NSFR_high;i++) {
if(!isfinite(Nion_spline[i])) {
LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
void initialise_Nion_General_spline_MINI(float z, float Mcrit_atom, float min_density, float max_density, float Mmax, float Mmin, float log10Mturn_min, float log10Mturn_max, float log10Mturn_min_MINI, float log10Mturn_max_MINI, float Alpha_star, float Alpha_star_mini, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES){
double growthf, sigma2;
double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999;
double overdense_small_high, overdense_small_low;
int i,j;
float ln_10;
if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) {
overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001;
}
else {
overdense_small_high = max_density;
}
overdense_small_low = min_density;
ln_10 = log(10);
float MassBinLow;
int MassBin;
growthf = dicke(z);
Mmin = log(Mmin);
Mmax = log(Mmax);
MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
for (i=0; i<NSFR_low; i++){
log10_overdense_spline_SFR[i] = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low));
}
for (i=0;i<NSFR_high;i++) {
Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low);
}
for (i=0;i<NMTURN;i++){
Mturns[i] = pow(10., log10Mturn_min + (float)i/((float)NMTURN-1.)*(log10Mturn_max-log10Mturn_min));
Mturns_MINI[i] = pow(10., log10Mturn_min_MINI + (float)i/((float)NMTURN-1.)*(log10Mturn_max_MINI-log10Mturn_min_MINI));
}
#pragma omp parallel shared(log10_Nion_spline,growthf,Mmax,sigma2,log10_overdense_spline_SFR,Mturns,Mturns_MINI,\
Alpha_star,Alpha_star_mini,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,ln_10,log10_Nion_spline_MINI,Mcrit_atom,\
Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \
private(i,j) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<NSFR_low; i++){
for (j=0; j<NMTURN; j++){
log10_Nion_spline[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\
pow(10.,log10_overdense_spline_SFR[i])-1.,Mturns[j],Alpha_star,\
Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES));
if(log10_Nion_spline[i+j*NSFR_low] < -40.){
log10_Nion_spline[i+j*NSFR_low] = -40.;
}
log10_Nion_spline[i+j*NSFR_low] *= ln_10;
log10_Nion_spline_MINI[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\
pow(10.,log10_overdense_spline_SFR[i])-1.,Mturns_MINI[j],Mcrit_atom,\
Alpha_star_mini,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES));
if(log10_Nion_spline_MINI[i+j*NSFR_low] < -40.){
log10_Nion_spline_MINI[i+j*NSFR_low] = -40.;
}
log10_Nion_spline_MINI[i+j*NSFR_low] *= ln_10;
}
}
}
for (i=0; i<NSFR_low; i++){
for (j=0; j<NMTURN; j++){
if(isfinite(log10_Nion_spline[i+j*NSFR_low])==0) {
LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline");
// Throw(ParameterError);
Throw(TableGenerationError);
}
if(isfinite(log10_Nion_spline_MINI[i+j*NSFR_low])==0) {
LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
#pragma omp parallel shared(Nion_spline,growthf,Mmin,Mmax,sigma2,Overdense_spline_SFR,Mturns,Alpha_star,Alpha_star_mini,\
Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,Nion_spline_MINI,Mturns_MINI,Mcrit_atom,\
Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \
private(i,j) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for(i=0;i<NSFR_high;i++) {
for (j=0; j<NMTURN; j++){
Nion_spline[i+j*NSFR_high] = Nion_ConditionalM(
growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i],
Mturns[j],Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES
);
if(Nion_spline[i+j*NSFR_high]<0.) {
Nion_spline[i+j*NSFR_high]=pow(10.,-40.0);
}
Nion_spline_MINI[i+j*NSFR_high] = Nion_ConditionalM_MINI(
growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i],
Mturns_MINI[j],Mcrit_atom,Alpha_star_mini,Alpha_esc,Fstar7_MINI,Fesc7_MINI,
Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES
);
if(Nion_spline_MINI[i+j*NSFR_high]<0.) {
Nion_spline_MINI[i+j*NSFR_high]=pow(10.,-40.0);
}
}
}
}
for(i=0;i<NSFR_high;i++) {
for (j=0; j<NMTURN; j++){
if(isfinite(Nion_spline[i+j*NSFR_high])==0) {
LOG_ERROR("Detected either an infinite or NaN value in Nion_spline");
// Throw(ParameterError);
Throw(TableGenerationError);
}
if(isfinite(Nion_spline_MINI[i+j*NSFR_high])==0) {
LOG_ERROR("Detected either an infinite or NaN value in Nion_spline_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
}
void initialise_Nion_General_spline_MINI_prev(float z, float Mcrit_atom, float min_density, float max_density, float Mmax, float Mmin, float log10Mturn_min, float log10Mturn_max, float log10Mturn_min_MINI, float log10Mturn_max_MINI, float Alpha_star, float Alpha_star_mini, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES){
double growthf, sigma2;
double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999;
double overdense_small_high, overdense_small_low;
int i,j;
float ln_10;
if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) {
overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001;
}
else {
overdense_small_high = max_density;
}
overdense_small_low = min_density;
ln_10 = log(10);
float MassBinLow;
int MassBin;
growthf = dicke(z);
Mmin = log(Mmin);
Mmax = log(Mmax);
MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
for (i=0; i<NSFR_low; i++){
prev_log10_overdense_spline_SFR[i] = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low));
}
for (i=0;i<NSFR_high;i++) {
prev_Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low);
}
for (i=0;i<NMTURN;i++){
Mturns[i] = pow(10., log10Mturn_min + (float)i/((float)NMTURN-1.)*(log10Mturn_max-log10Mturn_min));
Mturns_MINI[i] = pow(10., log10Mturn_min_MINI + (float)i/((float)NMTURN-1.)*(log10Mturn_max_MINI-log10Mturn_min_MINI));
}
#pragma omp parallel shared(prev_log10_Nion_spline,growthf,Mmax,sigma2,prev_log10_overdense_spline_SFR,Mturns,Alpha_star,Alpha_star_mini,\
Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,prev_log10_Nion_spline_MINI,Mturns_MINI,Mcrit_atom,\
Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \
private(i,j) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<NSFR_low; i++){
for (j=0; j<NMTURN; j++){
prev_log10_Nion_spline[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\
pow(10.,prev_log10_overdense_spline_SFR[i])-1.,Mturns[j],\
Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES));
if(prev_log10_Nion_spline[i+j*NSFR_low] < -40.){
prev_log10_Nion_spline[i+j*NSFR_low] = -40.;
}
prev_log10_Nion_spline[i+j*NSFR_low] *= ln_10;
prev_log10_Nion_spline_MINI[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\
pow(10.,prev_log10_overdense_spline_SFR[i])-1.,Mturns_MINI[j],Mcrit_atom,\
Alpha_star_mini,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES));
if(prev_log10_Nion_spline_MINI[i+j*NSFR_low] < -40.){
prev_log10_Nion_spline_MINI[i+j*NSFR_low] = -40.;
}
prev_log10_Nion_spline_MINI[i+j*NSFR_low] *= ln_10;
}
}
}
for (i=0; i<NSFR_low; i++){
for (j=0; j<NMTURN; j++){
if(isfinite(prev_log10_Nion_spline[i+j*NSFR_low])==0) {
LOG_ERROR("Detected either an infinite or NaN value in prev_log10_Nion_spline");
// Throw(ParameterError);
Throw(TableGenerationError);
}
if(isfinite(prev_log10_Nion_spline_MINI[i+j*NSFR_low])==0) {
LOG_ERROR("Detected either an infinite or NaN value in prev_log10_Nion_spline_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
#pragma omp parallel shared(prev_Nion_spline,growthf,Mmin,Mmax,sigma2,prev_Overdense_spline_SFR,Mturns,\
Alpha_star,Alpha_star_mini,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,prev_Nion_spline_MINI,Mturns_MINI,\
Mcrit_atom,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \
private(i,j) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for(i=0;i<NSFR_high;i++) {
for (j=0; j<NMTURN; j++){
prev_Nion_spline[i+j*NSFR_high] = Nion_ConditionalM(growthf,Mmin,Mmax,sigma2,Deltac,prev_Overdense_spline_SFR[i],\
Mturns[j],Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES);
if(prev_Nion_spline[i+j*NSFR_high]<0.) {
prev_Nion_spline[i+j*NSFR_high]=pow(10.,-40.0);
}
prev_Nion_spline_MINI[i+j*NSFR_high] = Nion_ConditionalM_MINI(growthf,Mmin,Mmax,sigma2,Deltac,\
prev_Overdense_spline_SFR[i],Mturns_MINI[j],Mcrit_atom,Alpha_star_mini,\
Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES);
if(prev_Nion_spline_MINI[i+j*NSFR_high]<0.) {
prev_Nion_spline_MINI[i+j*NSFR_high]=pow(10.,-40.0);
}
}
}
}
for(i=0;i<NSFR_high;i++) {
for (j=0; j<NMTURN; j++){
if(isfinite(prev_Nion_spline[i+j*NSFR_high])==0) {
LOG_ERROR("Detected either an infinite or NaN value in prev_Nion_spline");
// Throw(ParameterError);
Throw(TableGenerationError);
}
if(isfinite(prev_Nion_spline_MINI[i+j*NSFR_high])==0) {
LOG_ERROR("Detected either an infinite or NaN value in prev_Nion_spline_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
}
void initialise_Nion_Ts_spline(
int Nbin, float zmin, float zmax, float MassTurn, float Alpha_star, float Alpha_esc,
float Fstar10, float Fesc10
){
int i;
float Mmin = MassTurn/50., Mmax = global_params.M_MAX_INTEGRAL;
float Mlim_Fstar, Mlim_Fesc;
if (z_val == NULL){
z_val = calloc(Nbin,sizeof(double));
Nion_z_val = calloc(Nbin,sizeof(double));
}
Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10);
Mlim_Fesc = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc10);
#pragma omp parallel shared(z_val,Nion_z_val,zmin,zmax, MassTurn, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc) private(i) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<Nbin; i++){
z_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin);
Nion_z_val[i] = Nion_General(z_val[i], Mmin, MassTurn, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc);
}
}
for (i=0; i<Nbin; i++){
if(isfinite(Nion_z_val[i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
void initialise_Nion_Ts_spline_MINI(
int Nbin, float zmin, float zmax, float Alpha_star, float Alpha_star_mini, float Alpha_esc, float Fstar10,
float Fesc10, float Fstar7_MINI, float Fesc7_MINI
){
int i,j;
float Mmin = global_params.M_MIN_INTEGRAL, Mmax = global_params.M_MAX_INTEGRAL;
float Mlim_Fstar, Mlim_Fesc, Mlim_Fstar_MINI, Mlim_Fesc_MINI, Mcrit_atom_val;
if (z_val == NULL){
z_val = calloc(Nbin,sizeof(double));
Nion_z_val = calloc(Nbin,sizeof(double));
Nion_z_val_MINI = calloc(Nbin*NMTURN,sizeof(double));
}
Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10);
Mlim_Fesc = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc10);
Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star_mini, Fstar7_MINI * pow(1e3, Alpha_star_mini));
Mlim_Fesc_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc7_MINI * pow(1e3, Alpha_esc));
float MassTurnover[NMTURN];
for (i=0;i<NMTURN;i++){
MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN));
}
#pragma omp parallel shared(z_val,Nion_z_val,Nbin,zmin,zmax,Mmin,Alpha_star,Alpha_star_mini,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,\
Nion_z_val_MINI,MassTurnover,Fstar7_MINI, Fesc7_MINI, Mlim_Fstar_MINI, Mlim_Fesc_MINI) \
private(i,j,Mcrit_atom_val) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<Nbin; i++){
z_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin);
Mcrit_atom_val = atomic_cooling_threshold(z_val[i]);
Nion_z_val[i] = Nion_General(z_val[i], Mmin, Mcrit_atom_val, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc);
for (j=0; j<NMTURN; j++){
Nion_z_val_MINI[i+j*Nbin] = Nion_General_MINI(z_val[i], Mmin, MassTurnover[j], Mcrit_atom_val, Alpha_star_mini, Alpha_esc, Fstar7_MINI, Fesc7_MINI, Mlim_Fstar_MINI, Mlim_Fesc_MINI);
}
}
}
for (i=0; i<Nbin; i++){
if(isfinite(Nion_z_val[i])==0) {
i = Nbin;
LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val");
// Throw(ParameterError);
Throw(TableGenerationError);
}
for (j=0; j<NMTURN; j++){
if(isfinite(Nion_z_val_MINI[i+j*Nbin])==0){
j = NMTURN;
LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
}
void initialise_SFRD_spline(int Nbin, float zmin, float zmax, float MassTurn, float Alpha_star, float Fstar10){
int i;
float Mmin = MassTurn/50., Mmax = global_params.M_MAX_INTEGRAL;
float Mlim_Fstar;
if (z_X_val == NULL){
z_X_val = calloc(Nbin,sizeof(double));
SFRD_val = calloc(Nbin,sizeof(double));
}
Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10);
#pragma omp parallel shared(z_X_val,SFRD_val,zmin,zmax, MassTurn, Alpha_star, Fstar10, Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<Nbin; i++){
z_X_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin);
SFRD_val[i] = Nion_General(z_X_val[i], Mmin, MassTurn, Alpha_star, 0., Fstar10, 1.,Mlim_Fstar,0.);
}
}
for (i=0; i<Nbin; i++){
if(isfinite(SFRD_val[i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in SFRD_val");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
void initialise_SFRD_spline_MINI(int Nbin, float zmin, float zmax, float Alpha_star, float Alpha_star_mini, float Fstar10, float Fstar7_MINI){
int i,j;
float Mmin = global_params.M_MIN_INTEGRAL, Mmax = global_params.M_MAX_INTEGRAL;
float Mlim_Fstar, Mlim_Fstar_MINI, Mcrit_atom_val;
if (z_X_val == NULL){
z_X_val = calloc(Nbin,sizeof(double));
SFRD_val = calloc(Nbin,sizeof(double));
SFRD_val_MINI = calloc(Nbin*NMTURN,sizeof(double));
}
Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10);
Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star_mini, Fstar7_MINI * pow(1e3, Alpha_star_mini));
float MassTurnover[NMTURN];
for (i=0;i<NMTURN;i++){
MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN));
}
#pragma omp parallel shared(z_X_val,zmin,zmax,Nbin,SFRD_val,Mmin, Alpha_star,Alpha_star_mini,Fstar10,Mlim_Fstar,\
SFRD_val_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI) \
private(i,j,Mcrit_atom_val) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<Nbin; i++){
z_X_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin);
Mcrit_atom_val = atomic_cooling_threshold(z_X_val[i]);
SFRD_val[i] = Nion_General(z_X_val[i], Mmin, Mcrit_atom_val, Alpha_star, 0., Fstar10, 1.,Mlim_Fstar,0.);
for (j=0; j<NMTURN; j++){
SFRD_val_MINI[i+j*Nbin] = Nion_General_MINI(z_X_val[i], Mmin, MassTurnover[j], Mcrit_atom_val, Alpha_star_mini, 0., Fstar7_MINI, 1.,Mlim_Fstar_MINI,0.);
}
}
}
for (i=0; i<Nbin; i++){
if(isfinite(SFRD_val[i])==0) {
i = Nbin;
LOG_ERROR("Detected either an infinite or NaN value in SFRD_val");
// Throw(ParameterError);
Throw(TableGenerationError);
}
for (j=0; j<NMTURN; j++){
if(isfinite(SFRD_val_MINI[i+j*Nbin])==0) {
j = NMTURN;
LOG_ERROR("Detected either an infinite or NaN value in SFRD_val_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
}
void initialise_SFRD_Conditional_table(
int Nfilter, float min_density[], float max_density[], float growthf[], float R[],
float MassTurnover, float Alpha_star, float Fstar10, bool FAST_FCOLL_TABLES
){
double overdense_val;
double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION;
double overdense_small_high, overdense_small_low;
float Mmin,Mmax,Mlim_Fstar,sigma2;
int i,j,k,i_tot;
float ln_10;
ln_10 = log(10);
Mmin = MassTurnover/50.;
Mmax = RtoM(R[Nfilter-1]);
Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10);
Mmin = log(Mmin);
for (i=0; i<NSFR_high;i++) {
overdense_high_table[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low);
}
float MassBinLow;
int MassBin;
for (j=0; j < Nfilter; j++) {
Mmax = RtoM(R[j]);
initialiseGL_Nion_Xray(NGL_SFR, MassTurnover/50., Mmax);
Mmax = log(Mmax);
MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
if(min_density[j]*growthf[j] < -1.) {
overdense_small_low = -1. + global_params.MIN_DENSITY_LOW_LIMIT;
}
else {
overdense_small_low = min_density[j]*growthf[j];
}
overdense_small_high = max_density[j]*growthf[j];
if(overdense_small_high > global_params.CRIT_DENS_TRANSITION) {
overdense_small_high = global_params.CRIT_DENS_TRANSITION;
}
for (i=0; i<NSFR_low; i++) {
overdense_val = log10(1. + overdense_small_low) + (float)i/((float)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low));
overdense_low_table[i] = pow(10.,overdense_val);
}
#pragma omp parallel shared(log10_SFRD_z_low_table,growthf,Mmax,sigma2,overdense_low_table,MassTurnover,Alpha_star,Fstar10,Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<NSFR_low; i++){
log10_SFRD_z_low_table[j][i] = GaussLegendreQuad_Nion(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,MassTurnover,Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES);
if(fabs(log10_SFRD_z_low_table[j][i]) < 1e-38) {
log10_SFRD_z_low_table[j][i] = 1e-38;
}
log10_SFRD_z_low_table[j][i] = log10(log10_SFRD_z_low_table[j][i]);
log10_SFRD_z_low_table[j][i] += 10.0;
log10_SFRD_z_low_table[j][i] *= ln_10;
}
}
for (i=0; i<NSFR_low; i++){
if(isfinite(log10_SFRD_z_low_table[j][i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
#pragma omp parallel shared(SFRD_z_high_table,growthf,Mmin,Mmax,sigma2,overdense_high_table,MassTurnover,Alpha_star,Fstar10,Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for(i=0;i<NSFR_high;i++) {
SFRD_z_high_table[j][i] = Nion_ConditionalM(growthf[j],Mmin,Mmax,sigma2,Deltac,overdense_high_table[i],MassTurnover,Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES);
SFRD_z_high_table[j][i] *= pow(10., 10.0);
}
}
for(i=0;i<NSFR_high;i++) {
if(isfinite(SFRD_z_high_table[j][i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
}
void initialise_SFRD_Conditional_table_MINI(
int Nfilter, float min_density[], float max_density[], float growthf[], float R[],
float Mcrit_atom[], float Alpha_star, float Alpha_star_mini, float Fstar10, float Fstar7_MINI, bool FAST_FCOLL_TABLES
){
double overdense_val;
double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION;
double overdense_small_high, overdense_small_low;
float Mmin,Mmax,Mlim_Fstar,sigma2,Mlim_Fstar_MINI;
int i,j,k,i_tot;
float ln_10;
ln_10 = log(10);
Mmin = global_params.M_MIN_INTEGRAL;
Mmax = RtoM(R[Nfilter-1]);
Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10);
Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star_mini, Fstar7_MINI * pow(1e3, Alpha_star_mini));
float MassTurnover[NMTURN];
for (i=0;i<NMTURN;i++){
MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN));
}
Mmin = log(Mmin);
for (i=0; i<NSFR_high;i++) {
overdense_high_table[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low);
}
float MassBinLow;
int MassBin;
for (j=0; j < Nfilter; j++) {
Mmax = RtoM(R[j]);
initialiseGL_Nion_Xray(NGL_SFR, global_params.M_MIN_INTEGRAL, Mmax);
Mmax = log(Mmax);
MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width );
MassBinLow = MinMass + mass_bin_width*(float)MassBin;
sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width;
if(min_density[j]*growthf[j] < -1.) {
overdense_small_low = -1. + global_params.MIN_DENSITY_LOW_LIMIT;
}
else {
overdense_small_low = min_density[j]*growthf[j];
}
overdense_small_high = max_density[j]*growthf[j];
if(overdense_small_high > global_params.CRIT_DENS_TRANSITION) {
overdense_small_high = global_params.CRIT_DENS_TRANSITION;
}
for (i=0; i<NSFR_low; i++) {
overdense_val = log10(1. + overdense_small_low) + (float)i/((float)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low));
overdense_low_table[i] = pow(10.,overdense_val);
}
#pragma omp parallel shared(log10_SFRD_z_low_table,growthf,Mmax,sigma2,overdense_low_table,Mcrit_atom,Alpha_star,Alpha_star_mini,Fstar10,Mlim_Fstar,\
log10_SFRD_z_low_table_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI,ln_10) \
private(i,k) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for (i=0; i<NSFR_low; i++){
log10_SFRD_z_low_table[j][i] = log10(GaussLegendreQuad_Nion(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,Mcrit_atom[j],Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES));
if(log10_SFRD_z_low_table[j][i] < -50.){
log10_SFRD_z_low_table[j][i] = -50.;
}
log10_SFRD_z_low_table[j][i] += 10.0;
log10_SFRD_z_low_table[j][i] *= ln_10;
for (k=0; k<NMTURN; k++){
log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,MassTurnover[k], Mcrit_atom[j],Alpha_star_mini,0.,Fstar7_MINI,1.,Mlim_Fstar_MINI, 0., FAST_FCOLL_TABLES));
if(log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] < -50.){
log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] = -50.;
}
log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] += 10.0;
log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] *= ln_10;
}
}
}
for (i=0; i<NSFR_low; i++){
if(isfinite(log10_SFRD_z_low_table[j][i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table");
// Throw(ParameterError);
Throw(TableGenerationError);
}
for (k=0; k<NMTURN; k++){
if(isfinite(log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low])==0) {
LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
#pragma omp parallel shared(SFRD_z_high_table,growthf,Mmin,Mmax,sigma2,overdense_high_table,Mcrit_atom,Alpha_star,Alpha_star_mini,Fstar10,\
Mlim_Fstar,SFRD_z_high_table_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI) \
private(i,k) num_threads(user_params_ps->N_THREADS)
{
#pragma omp for
for(i=0;i<NSFR_high;i++) {
SFRD_z_high_table[j][i] = Nion_ConditionalM(growthf[j],Mmin,Mmax,sigma2,Deltac,overdense_high_table[i],\
Mcrit_atom[j],Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES);
if (SFRD_z_high_table[j][i] < 1e-50){
SFRD_z_high_table[j][i] = 1e-50;
}
SFRD_z_high_table[j][i] *= pow(10., 10.0);
for (k=0; k<NMTURN; k++){
SFRD_z_high_table_MINI[j][i+k*NSFR_high] = Nion_ConditionalM_MINI(growthf[j],Mmin,Mmax,sigma2,Deltac,\
overdense_high_table[i],MassTurnover[k],Mcrit_atom[j],\
Alpha_star_mini,0.,Fstar7_MINI,1.,Mlim_Fstar_MINI, 0., FAST_FCOLL_TABLES);
if (SFRD_z_high_table_MINI[j][i+k*NSFR_high] < 1e-50){
SFRD_z_high_table_MINI[j][i+k*NSFR_high] = 1e-50;
}
}
}
}
for(i=0;i<NSFR_high;i++) {
if(isfinite(SFRD_z_high_table[j][i])==0) {
LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table");
// Throw(ParameterError);
Throw(TableGenerationError);
}
for (k=0; k<NMTURN; k++){
if(isfinite(SFRD_z_high_table_MINI[j][i+k*NSFR_high])==0) {
LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table_MINI");
// Throw(ParameterError);
Throw(TableGenerationError);
}
}
}
}
}
// The volume filling factor at a given redshift, Q(z), or find redshift at a given Q, z(Q).
//
// The evolution of Q can be written as
// dQ/dt = n_{ion}/dt - Q/t_{rec},
// where n_{ion} is the number of ionizing photons per baryon. The averaged recombination time is given by
// t_{rec} ~ 0.93 Gyr * (C_{HII}/3)^-1 * (T_0/2e4 K)^0.7 * ((1+z)/7)^-3.
// We assume the clumping factor of C_{HII}=3 and the IGM temperature of T_0 = 2e4 K, following
// Section 2.1 of Kuhlen & Faucher-Gigue`re (2012) MNRAS, 423, 862 and references therein.
// 1) initialise interpolation table
// -> initialise_Q_value_spline(NoRec, M_TURN, ALPHA_STAR, ALPHA_ESC, F_STAR10, F_ESC10)
// NoRec = 0: Compute dQ/dt with the recombination time.
// NoRec = 1: Ignore recombination.
// 2) find Q value at a given z -> Q_at_z(z, &(Q))
// or find z at a given Q -> z_at_Q(Q, &(z)).
// 3) free memory allocation -> free_Q_value()
// Set up interpolation table for the volume filling factor, Q, at a given redshift z and redshift at a given Q.
int InitialisePhotonCons(struct UserParams *user_params, struct CosmoParams *cosmo_params,
struct AstroParams *astro_params, struct FlagOptions *flag_options)
{
/*
This is an API-level function for initialising the photon conservation.
*/
int status;
Try{ // this try wraps the whole function.
Broadcast_struct_global_PS(user_params,cosmo_params);
Broadcast_struct_global_UF(user_params,cosmo_params);
init_ps();
// To solve differentail equation, uses Euler's method.
// NOTE:
// (1) With the fiducial parameter set,
// when the Q value is < 0.9, the difference is less than 5% compared with accurate calculation.
// When Q ~ 0.98, the difference is ~25%. To increase accuracy one can reduce the step size 'da', but it will increase computing time.
// (2) With the fiducial parameter set,
// the difference for the redshift where the reionization end (Q = 1) is ~0.2 % compared with accurate calculation.
float ION_EFF_FACTOR,M_MIN,M_MIN_z0,M_MIN_z1,Mlim_Fstar, Mlim_Fesc;
double a_start = 0.03, a_end = 1./(1. + global_params.PhotonConsEndCalibz); // Scale factors of 0.03 and 0.17 correspond to redshifts of ~32 and ~5.0, respectively.
double C_HII = 3., T_0 = 2e4;
double reduce_ratio = 1.003;
double Q0,Q1,Nion0,Nion1,Trec,da,a,z0,z1,zi,dadt,ans,delta_a,zi_prev,Q1_prev;
double *z_arr,*Q_arr;
int Nmax = 2000; // This is the number of step, enough with 'da = 2e-3'. If 'da' is reduced, this number should be checked.
int cnt, nbin, i, istart;
int fail_condition, not_mono_increasing, num_fails;
int gsl_status;
z_arr = calloc(Nmax,sizeof(double));
Q_arr = calloc(Nmax,sizeof(double));
//set the minimum source mass
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
ION_EFF_FACTOR = global_params.Pop2_ion * astro_params->F_STAR10 * astro_params->F_ESC10;
M_MIN = astro_params->M_TURN/50.;
Mlim_Fstar = Mass_limit_bisection(M_MIN, global_params.M_MAX_INTEGRAL, astro_params->ALPHA_STAR, astro_params->F_STAR10);
Mlim_Fesc = Mass_limit_bisection(M_MIN, global_params.M_MAX_INTEGRAL, astro_params->ALPHA_ESC, astro_params->F_ESC10);
if(user_params->FAST_FCOLL_TABLES){
initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN),1e20);
}
else{
initialiseSigmaMInterpTable(M_MIN,1e20);
}
}
else {
ION_EFF_FACTOR = astro_params->HII_EFF_FACTOR;
}
fail_condition = 1;
num_fails = 0;
// We are going to come up with the analytic curve for the photon non conservation correction
// This can be somewhat numerically unstable and as such we increase the sampling until it works
// If it fails to produce a monotonically increasing curve (for Q as a function of z) after 10 attempts we crash out
while(fail_condition!=0) {
a = a_start;
if(num_fails < 3) {
da = 3e-3 - ((double)num_fails)*(1e-3);
}
else {
da = 1e-3 - ((double)num_fails - 2.)*(1e-4);
}
delta_a = 1e-7;
zi_prev = Q1_prev = 0.;
not_mono_increasing = 0;
if(num_fails>0) {
for(i=0;i<Nmax;i++) {
z_arr[i] = 0.;
Q_arr[i] = 0.;
}
}
cnt = 0;
Q0 = 0.;
while (a < a_end) {
zi = 1./a - 1.;
z0 = 1./(a+delta_a) - 1.;
z1 = 1./(a-delta_a) - 1.;
// Ionizing emissivity (num of photons per baryon)
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
Nion0 = ION_EFF_FACTOR*Nion_General(z0, astro_params->M_TURN/50., astro_params->M_TURN, astro_params->ALPHA_STAR,
astro_params->ALPHA_ESC, astro_params->F_STAR10, astro_params->F_ESC10,
Mlim_Fstar, Mlim_Fesc);
Nion1 = ION_EFF_FACTOR*Nion_General(z1, astro_params->M_TURN/50., astro_params->M_TURN, astro_params->ALPHA_STAR,
astro_params->ALPHA_ESC, astro_params->F_STAR10, astro_params->F_ESC10,
Mlim_Fstar, Mlim_Fesc);
}
else {
//set the minimum source mass
if (astro_params->ION_Tvir_MIN < 9.99999e3) { // neutral IGM
M_MIN_z0 = (float)TtoM(z0, astro_params->ION_Tvir_MIN, 1.22);
M_MIN_z1 = (float)TtoM(z1, astro_params->ION_Tvir_MIN, 1.22);
}
else { // ionized IGM
M_MIN_z0 = (float)TtoM(z0, astro_params->ION_Tvir_MIN, 0.6);
M_MIN_z1 = (float)TtoM(z1, astro_params->ION_Tvir_MIN, 0.6);
}
if(M_MIN_z0 < M_MIN_z1) {
if(user_params->FAST_FCOLL_TABLES){
initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN_z0),1e20);
}
else{
initialiseSigmaMInterpTable(M_MIN_z0,1e20);
}
}
else {
if(user_params->FAST_FCOLL_TABLES){
initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN_z1),1e20);
}
else{
initialiseSigmaMInterpTable(M_MIN_z1,1e20);
}
}
Nion0 = ION_EFF_FACTOR*FgtrM_General(z0,M_MIN_z0);
Nion1 = ION_EFF_FACTOR*FgtrM_General(z1,M_MIN_z1);
freeSigmaMInterpTable();
}
// With scale factor a, the above equation is written as dQ/da = n_{ion}/da - Q/t_{rec}*(dt/da)
if (!global_params.RecombPhotonCons) {
Q1 = Q0 + ((Nion0-Nion1)/2/delta_a)*da; // No Recombination
}
else {
dadt = Ho*sqrt(cosmo_params_ps->OMm/a + global_params.OMr/a/a + cosmo_params_ps->OMl*a*a); // da/dt = Ho*a*sqrt(OMm/a^3 + OMr/a^4 + OMl)
Trec = 0.93 * 1e9 * SperYR * pow(C_HII/3.,-1) * pow(T_0/2e4,0.7) * pow((1.+zi)/7.,-3);
Q1 = Q0 + ((Nion0-Nion1)/2./delta_a - Q0/Trec/dadt)*da;
}
// Curve is no longer monotonically increasing, we are going to have to exit and start again
if(Q1 < Q1_prev) {
not_mono_increasing = 1;
break;
}
zi_prev = zi;
Q1_prev = Q1;
z_arr[cnt] = zi;
Q_arr[cnt] = Q1;
cnt = cnt + 1;
if (Q1 >= 1.0) break; // if fully ionized, stop here.
// As the Q value increases, the bin size decreases gradually because more accurate calculation is required.
if (da < 7e-5) da = 7e-5; // set minimum bin size.
else da = pow(da,reduce_ratio);
Q0 = Q1;
a = a + da;
}
// A check to see if we ended up with a monotonically increasing function
if(not_mono_increasing==0) {
fail_condition = 0;
}
else {
num_fails += 1;
if(num_fails>10) {
LOG_ERROR("Failed too many times.");
// Throw ParameterError;
Throw(PhotonConsError);
}
}
}
cnt = cnt - 1;
istart = 0;
for (i=1;i<cnt;i++){
if (Q_arr[i-1] == 0. && Q_arr[i] != 0.) istart = i-1;
}
nbin = cnt - istart;
N_analytic = nbin;
// initialise interploation Q as a function of z
z_Q = calloc(nbin,sizeof(double));
Q_value = calloc(nbin,sizeof(double));
Q_at_z_spline_acc = gsl_interp_accel_alloc ();
Q_at_z_spline = gsl_spline_alloc (gsl_interp_cspline, nbin);
for (i=0; i<nbin; i++){
z_Q[i] = z_arr[cnt-i];
Q_value[i] = Q_arr[cnt-i];
}
gsl_set_error_handler_off();
gsl_status = gsl_spline_init(Q_at_z_spline, z_Q, Q_value, nbin);
GSL_ERROR(gsl_status);
Zmin = z_Q[0];
Zmax = z_Q[nbin-1];
Qmin = Q_value[nbin-1];
Qmax = Q_value[0];
// initialise interpolation z as a function of Q
double *Q_z = calloc(nbin,sizeof(double));
double *z_value = calloc(nbin,sizeof(double));
z_at_Q_spline_acc = gsl_interp_accel_alloc ();
z_at_Q_spline = gsl_spline_alloc (gsl_interp_linear, nbin);
for (i=0; i<nbin; i++){
Q_z[i] = Q_value[nbin-1-i];
z_value[i] = z_Q[nbin-1-i];
}
gsl_status = gsl_spline_init(z_at_Q_spline, Q_z, z_value, nbin);
GSL_ERROR(gsl_status);
free(z_arr);
free(Q_arr);
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
freeSigmaMInterpTable;
}
LOG_DEBUG("Initialised PhotonCons.");
} // End of try
Catch(status){
return status;
}
return(0);
}
// Function to construct the spline for the calibration curve of the photon non-conservation
int PhotonCons_Calibration(double *z_estimate, double *xH_estimate, int NSpline){
int status;
Try{
if(xH_estimate[NSpline-1] > 0.0 && xH_estimate[NSpline-2] > 0.0 && xH_estimate[NSpline-3] > 0.0 && xH_estimate[0] <= global_params.PhotonConsStart) {
initialise_NFHistory_spline(z_estimate,xH_estimate,NSpline);
}
}
Catch(status){
return status;
}
return(0);
}
// Function callable from Python to know at which redshift to start sampling the calibration curve (to minimise function calls)
int ComputeZstart_PhotonCons(double *zstart) {
int status;
double temp;
Try{
if((1.-global_params.PhotonConsStart) > Qmax) {
// It is possible that reionisation never even starts
// Just need to arbitrarily set a high redshift to perform the algorithm
temp = 20.;
}
else {
z_at_Q(1. - global_params.PhotonConsStart,&(temp));
// Multiply the result by 10 per-cent to fix instances when this isn't high enough
temp *= 1.1;
}
}
Catch(status){
return(status); // Use the status to determine if something went wrong.
}
*zstart = temp;
return(0);
}
void determine_deltaz_for_photoncons() {
int i, j, increasing_val, counter, smoothing_int;
double temp;
float z_cal, z_analytic, NF_sample, returned_value, NF_sample_min, gradient_analytic, z_analytic_at_endpoint, const_offset, z_analytic_2, smoothing_width;
float bin_width, delta_NF, val1, val2, extrapolated_value;
LOG_DEBUG("Determining deltaz for photon cons.");
// Number of points for determine the delta z correction of the photon non-conservation
N_NFsamples = 100;
// Determine the change in neutral fraction to calculate the gradient for the linear extrapolation of the photon non-conservation correction
delta_NF = 0.025;
// A width (in neutral fraction data points) in which point we average over to try and avoid sharp features in the correction (removes some kinks)
// Effectively acts as filtering step
smoothing_width = 35.;
// The photon non-conservation correction has a threshold (in terms of neutral fraction; global_params.PhotonConsEnd) for which we switch
// from using the exact correction between the calibrated (21cmFAST all flag options off) to analytic expression to some extrapolation.
// This threshold is required due to the behaviour of 21cmFAST at very low neutral fractions, which cause extreme behaviour with recombinations on
// A lot of the steps and choices are not completely rubust, just chosed to smooth/average the data to have smoother resultant reionisation histories
// Determine the number of extrapolated points required, if required at all.
if(calibrated_NF_min < global_params.PhotonConsEnd) {
// We require extrapolation, set minimum point to the threshold, and extrapolate beyond.
NF_sample_min = global_params.PhotonConsEnd;
// Determine the number of extrapolation points (to better smooth the correction) between the threshod (global_params.PhotonConsEnd) and a
// point close to zero neutral fraction (set by global_params.PhotonConsAsymptoteTo)
// Choice is to get the delta neutral fraction between extrapolated points to be similar to the cadence in the exact correction
if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) {
N_extrapolated = ((float)N_NFsamples - 1.)*(NF_sample_min - calibrated_NF_min)/( global_params.PhotonConsStart - NF_sample_min );
}
else {
N_extrapolated = ((float)N_NFsamples - 1.)*(NF_sample_min - global_params.PhotonConsAsymptoteTo)/( global_params.PhotonConsStart - NF_sample_min );
}
N_extrapolated = (int)floor( N_extrapolated ) - 1; // Minus one as the zero point is added below
}
else {
// No extrapolation required, neutral fraction never reaches zero
NF_sample_min = calibrated_NF_min;
N_extrapolated = 0;
}
// Determine the bin width for the sampling of the neutral fraction for the correction
bin_width = ( global_params.PhotonConsStart - NF_sample_min )/((float)N_NFsamples - 1.);
// allocate memory for arrays required to determine the photon non-conservation correction
deltaz = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double));
deltaz_smoothed = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double));
NeutralFractions = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double));
// Go through and fill the data points (neutral fraction and corresponding delta z between the calibrated and analytic curves).
for(i=0;i<N_NFsamples;i++) {
NF_sample = NF_sample_min + bin_width*(float)i;
// Determine redshift given a neutral fraction for the calibration curve
z_at_NFHist(NF_sample,&(temp));
z_cal = temp;
// Determine redshift given a neutral fraction for the analytic curve
z_at_Q(1. - NF_sample,&(temp));
z_analytic = temp;
deltaz[i+1+N_extrapolated] = fabs( z_cal - z_analytic );
NeutralFractions[i+1+N_extrapolated] = NF_sample;
}
// Determining the end-point (lowest neutral fraction) for the photon non-conservation correction
if(calibrated_NF_min >= global_params.PhotonConsEnd) {
increasing_val = 0;
counter = 0;
// Check if all the values of delta z are increasing
for(i=0;i<(N_NFsamples-1);i++) {
if(deltaz[i+1+N_extrapolated] >= deltaz[i+N_extrapolated]) {
counter += 1;
}
}
// If all the values of delta z are increasing, then some of the smoothing of the correction done below cannot be performed
if(counter==(N_NFsamples-1)) {
increasing_val = 1;
}
// Since we never have reionisation, need to set an appropriate end-point for the correction
// Take some fraction of the previous point to determine the end-point
NeutralFractions[0] = 0.999*NF_sample_min;
if(increasing_val) {
// Values of delta z are always increasing with decreasing neutral fraction thus make the last point slightly larger
deltaz[0] = 1.001*deltaz[1];
}
else {
// Values of delta z are always decreasing with decreasing neutral fraction thus make the last point slightly smaller
deltaz[0] = 0.999*deltaz[1];
}
}
else {
// Ok, we are going to be extrapolating the photon non-conservation (delta z) beyond the threshold
// Construct a linear curve for the analytic function to extrapolate to the new endpoint
// The choice for doing so is to ensure the corrected reionisation history is mostly smooth, and doesn't
// artificially result in kinks due to switching between how the delta z should be calculated
z_at_Q(1. - (NeutralFractions[1+N_extrapolated] + delta_NF),&(temp));
z_analytic = temp;
z_at_Q(1. - NeutralFractions[1+N_extrapolated],&(temp));
z_analytic_2 = temp;
// determine the linear curve
// Multiplitcation by 1.1 is arbitrary but effectively smooths out most kinks observed in the resultant corrected reionisation histories
gradient_analytic = 1.1*( delta_NF )/( z_analytic - z_analytic_2 );
const_offset = ( NeutralFractions[1+N_extrapolated] + delta_NF ) - gradient_analytic * z_analytic;
// determine the extrapolation end point
if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) {
extrapolated_value = calibrated_NF_min;
}
else {
extrapolated_value = global_params.PhotonConsAsymptoteTo;
}
// calculate the delta z for the extrapolated end point
z_at_NFHist(extrapolated_value,&(temp));
z_cal = temp;
z_analytic_at_endpoint = ( extrapolated_value - const_offset )/gradient_analytic ;
deltaz[0] = fabs( z_cal - z_analytic_at_endpoint );
NeutralFractions[0] = extrapolated_value;
// If performing extrapolation, add in all the extrapolated points between the end-point and the threshold to end the correction (global_params.PhotonConsEnd)
for(i=0;i<N_extrapolated;i++) {
if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) {
NeutralFractions[i+1] = calibrated_NF_min + (NF_sample_min - calibrated_NF_min)*(float)(i+1)/((float)N_extrapolated + 1.);
}
else {
NeutralFractions[i+1] = global_params.PhotonConsAsymptoteTo + (NF_sample_min - global_params.PhotonConsAsymptoteTo)*(float)(i+1)/((float)N_extrapolated + 1.);
}
deltaz[i+1] = deltaz[0] + ( deltaz[1+N_extrapolated] - deltaz[0] )*(float)(i+1)/((float)N_extrapolated + 1.);
}
}
// We have added the extrapolated values, now check if they are all increasing or not (again, to determine whether or not to try and smooth the corrected curve
increasing_val = 0;
counter = 0;
for(i=0;i<(N_NFsamples-1);i++) {
if(deltaz[i+1+N_extrapolated] >= deltaz[i+N_extrapolated]) {
counter += 1;
}
}
if(counter==(N_NFsamples-1)) {
increasing_val = 1;
}
// For some models, the resultant delta z for extremely high neutral fractions ( > 0.95) seem to oscillate or sometimes drop in value.
// This goes through and checks if this occurs, and tries to smooth this out
// This doesn't occur very often, but can cause an artificial drop in the reionisation history (neutral fraction value) connecting the
// values before/after the photon non-conservation correction starts.
for(i=0;i<(N_NFsamples+N_extrapolated);i++) {
val1 = deltaz[i];
val2 = deltaz[i+1];
counter = 0;
// Check if we have a neutral fraction above 0.95, that the values are decreasing (val2 < val1), that we haven't sampled too many points (counter)
// and that the NF_sample_min is less than around 0.8. That is, if a reasonable fraction of the reionisation history is sampled.
while( NeutralFractions[i+1] > 0.95 && val2 < val1 && NF_sample_min < 0.8 && counter < 100) {
NF_sample = global_params.PhotonConsStart - 0.001*(counter+1);
// Determine redshift given a neutral fraction for the calibration curve
z_at_NFHist(NF_sample,&(temp));
z_cal = temp;
// Determine redshift given a neutral fraction for the analytic curve
z_at_Q(1. - NF_sample,&(temp));
z_analytic = temp;
// Determine the delta z
val2 = fabs( z_cal - z_analytic );
deltaz[i+1] = val2;
counter += 1;
// If after 100 samplings we couldn't get the value to increase (like it should), just modify it from the previous point.
if(counter==100) {
deltaz[i+1] = deltaz[i] * 1.01;
}
}
}
// Store the data in its intermediate state before averaging
for(i=0;i<(N_NFsamples+N_extrapolated+1);i++) {
deltaz_smoothed[i] = deltaz[i];
}
// If we are not increasing for all values, we can smooth out some features in delta z when connecting the extrapolated delta z values
// compared to those from the exact correction (i.e. when we cross the threshold).
if(!increasing_val) {
for(i=0;i<(N_NFsamples+N_extrapolated);i++) {
val1 = deltaz[0];
val2 = deltaz[i+1];
counter = 0;
// Try and find a point which can be used to smooth out any dip in delta z as a function of neutral fraction.
// It can be flat, then drop, then increase. This smooths over this drop (removes a kink in the resultant reionisation history).
// Choice of 75 is somewhat arbitrary
while(val2 < val1 && (counter < 75 || (1+(i+1)+counter) > (N_NFsamples+N_extrapolated))) {
counter += 1;
val2 = deltaz[i+1+counter];
deltaz_smoothed[i+1] = ( val1 + deltaz[1+(i+1)+counter] )/2.;
}
if(counter==75 || (1+(i+1)+counter) > (N_NFsamples+N_extrapolated)) {
deltaz_smoothed[i+1] = deltaz[i+1];
}
}
}
// Here we effectively filter over the delta z as a function of neutral fraction to try and minimise any possible kinks etc. in the functional curve.
for(i=0;i<(N_NFsamples+N_extrapolated+1);i++) {
// We are at the end-points, cannot smooth
if(i==0 || i==(N_NFsamples+N_extrapolated)) {
deltaz[i] = deltaz_smoothed[i];
}
else {
deltaz[i] = 0.;
// We are symmetrically smoothing, making sure we have the same number of data points either side of the point we are filtering over
// This determins the filter width when close to the edge of the data ranges
if( (i - (int)floor(smoothing_width/2.) ) < 0) {
smoothing_int = 2*( i ) + (int)((int)smoothing_width%2);
}
else if( (i - (int)floor(smoothing_width/2.) + ((int)smoothing_width - 1) ) > (N_NFsamples + N_extrapolated) ) {
smoothing_int = ((int)smoothing_width - 1) - 2*((i - (int)floor(smoothing_width/2.) + ((int)smoothing_width - 1) ) - (N_NFsamples + N_extrapolated) ) + (int)((int)smoothing_width%2);
}
else {
smoothing_int = (int)smoothing_width;
}
// Average (filter) over the delta z values to smooth the result
counter = 0;
for(j=0;j<(int)smoothing_width;j++) {
if(((i - (int)floor((float)smoothing_int/2.) + j)>=0) && ((i - (int)floor((float)smoothing_int/2.) + j) <= (N_NFsamples + N_extrapolated + 1)) && counter < smoothing_int ) {
deltaz[i] += deltaz_smoothed[i - (int)floor((float)smoothing_int/2.) + j];
counter += 1;
}
}
deltaz[i] /= (float)counter;
}
}
N_deltaz = N_NFsamples + N_extrapolated + 1;
// Now, we can construct the spline of the photon non-conservation correction (delta z as a function of neutral fraction)
deltaz_spline_for_photoncons_acc = gsl_interp_accel_alloc ();
deltaz_spline_for_photoncons = gsl_spline_alloc (gsl_interp_linear, N_NFsamples + N_extrapolated + 1);
gsl_set_error_handler_off();
int gsl_status;
gsl_status = gsl_spline_init(deltaz_spline_for_photoncons, NeutralFractions, deltaz, N_NFsamples + N_extrapolated + 1);
GSL_ERROR(gsl_status);
}
float adjust_redshifts_for_photoncons(
struct AstroParams *astro_params, struct FlagOptions *flag_options, float *redshift,
float *stored_redshift, float *absolute_delta_z
) {
int i, new_counter;
double temp;
float required_NF, adjusted_redshift, future_z, gradient_extrapolation, const_extrapolation, temp_redshift, check_required_NF;
LOG_DEBUG("Adjusting redshifts for photon cons.");
if(*redshift < global_params.PhotonConsEndCalibz) {
LOG_ERROR(
"You have passed a redshift (z = %f) that is lower than the enpoint of the photon non-conservation correction "\
"(global_params.PhotonConsEndCalibz = %f). If this behaviour is desired then set global_params.PhotonConsEndCalibz "\
"to a value lower than z = %f.",*redshift,global_params.PhotonConsEndCalibz,*redshift
);
// Throw(ParameterError);
Throw(PhotonConsError);
}
// Determine the neutral fraction (filling factor) of the analytic calibration expression given the current sampled redshift
Q_at_z(*redshift, &(temp));
required_NF = 1.0 - (float)temp;
// Find which redshift we need to sample in order for the calibration reionisation history to match the analytic expression
if(required_NF > global_params.PhotonConsStart) {
// We haven't started ionising yet, so keep redshifts the same
adjusted_redshift = *redshift;
*absolute_delta_z = 0.;
}
else if(required_NF<=global_params.PhotonConsEnd) {
// We have gone beyond the threshold for the end of the photon non-conservation correction
// Deemed to be roughly where the calibration curve starts to approach the analytic expression
if(FirstNF_Estimate <= 0. && required_NF <= 0.0) {
// Reionisation has already happened well before the calibration
adjusted_redshift = *redshift;
}
else {
// We have crossed the NF threshold for the photon conservation correction so now set to the delta z at the threshold
if(required_NF < global_params.PhotonConsAsymptoteTo) {
// This counts the number of times we have exceeded the extrapolated point and attempts to modify the delta z
// to try and make the function a little smoother
*absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, global_params.PhotonConsAsymptoteTo, deltaz_spline_for_photoncons_acc);
new_counter = 0;
temp_redshift = *redshift;
check_required_NF = required_NF;
// Ok, find when in the past we exceeded the asymptote threshold value using the global_params.ZPRIME_STEP_FACTOR
// In doing it this way, co-eval boxes will be the same as lightcone boxes with regard to redshift sampling
while( check_required_NF < global_params.PhotonConsAsymptoteTo ) {
temp_redshift = ((1. + temp_redshift)*global_params.ZPRIME_STEP_FACTOR - 1.);
Q_at_z(temp_redshift, &(temp));
check_required_NF = 1.0 - (float)temp;
new_counter += 1;
}
// Now adjust the final delta_z by some amount to smooth if over successive steps
if(deltaz[1] > deltaz[0]) {
*absolute_delta_z = pow( 0.96 , (new_counter - 1) + 1. ) * ( *absolute_delta_z );
}
else {
*absolute_delta_z = pow( 1.04 , (new_counter - 1) + 1. ) * ( *absolute_delta_z );
}
// Check if we go into the future (z < 0) and avoid it
adjusted_redshift = (*redshift) - (*absolute_delta_z);
if(adjusted_redshift < 0.0) {
adjusted_redshift = 0.0;
}
}
else {
*absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, required_NF, deltaz_spline_for_photoncons_acc);
adjusted_redshift = (*redshift) - (*absolute_delta_z);
}
}
}
else {
// Initialise the photon non-conservation correction curve
if(!photon_cons_allocated) {
determine_deltaz_for_photoncons();
photon_cons_allocated = true;
}
// We have exceeded even the end-point of the extrapolation
// Just smooth ever subsequent point
// Note that this is deliberately tailored to light-cone quantites, but will still work with co-eval cubes
// Though might produce some very minor discrepancies when comparing outputs.
if(required_NF < NeutralFractions[0]) {
new_counter = 0;
temp_redshift = *redshift;
check_required_NF = required_NF;
// Ok, find when in the past we exceeded the asymptote threshold value using the global_params.ZPRIME_STEP_FACTOR
// In doing it this way, co-eval boxes will be the same as lightcone boxes with regard to redshift sampling
while( check_required_NF < NeutralFractions[0] ) {
temp_redshift = ((1. + temp_redshift)*global_params.ZPRIME_STEP_FACTOR - 1.);
Q_at_z(temp_redshift, &(temp));
check_required_NF = 1.0 - (float)temp;
new_counter += 1;
}
if(new_counter > 5) {
LOG_WARNING(
"The photon non-conservation correction has employed an extrapolation for\n"\
"more than 5 consecutive snapshots. This can be unstable, thus please check "\
"resultant history. Parameters are:\n"
);
#if LOG_LEVEL >= LOG_WARNING
writeAstroParams(flag_options, astro_params);
#endif
}
// Now adjust the final delta_z by some amount to smooth if over successive steps
if(deltaz[1] > deltaz[0]) {
*absolute_delta_z = pow( 0.998 , (new_counter - 1) + 1. ) * ( *absolute_delta_z );
}
else {
*absolute_delta_z = pow( 1.002 , (new_counter - 1) + 1. ) * ( *absolute_delta_z );
}
// Check if we go into the future (z < 0) and avoid it
adjusted_redshift = (*redshift) - (*absolute_delta_z);
if(adjusted_redshift < 0.0) {
adjusted_redshift = 0.0;
}
}
else {
// Find the corresponding redshift for the calibration curve given the required neutral fraction (filling factor) from the analytic expression
*absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, (double)required_NF, deltaz_spline_for_photoncons_acc);
adjusted_redshift = (*redshift) - (*absolute_delta_z);
}
}
// keep the original sampled redshift
*stored_redshift = *redshift;
// This redshift snapshot now uses the modified redshift following the photon non-conservation correction
*redshift = adjusted_redshift;
}
void Q_at_z(double z, double *splined_value){
float returned_value;
if (z >= Zmax) {
*splined_value = 0.;
}
else if (z <= Zmin) {
*splined_value = 1.;
}
else {
returned_value = gsl_spline_eval(Q_at_z_spline, z, Q_at_z_spline_acc);
*splined_value = returned_value;
}
}
void z_at_Q(double Q, double *splined_value){
float returned_value;
if (Q < Qmin) {
LOG_ERROR("The minimum value of Q is %.4e",Qmin);
// Throw(ParameterError);
Throw(PhotonConsError);
}
else if (Q > Qmax) {
LOG_ERROR("The maximum value of Q is %.4e. Reionization ends at ~%.4f.",Qmax,Zmin);
LOG_ERROR("This error can occur if global_params.PhotonConsEndCalibz is close to "\
"the final sampled redshift. One can consider a lower value for "\
"global_params.PhotonConsEndCalibz to mitigate this");
// Throw(ParameterError);
Throw(PhotonConsError);
}
else {
returned_value = gsl_spline_eval(z_at_Q_spline, Q, z_at_Q_spline_acc);
*splined_value = returned_value;
}
}
void free_Q_value() {
gsl_spline_free (Q_at_z_spline);
gsl_interp_accel_free (Q_at_z_spline_acc);
gsl_spline_free (z_at_Q_spline);
gsl_interp_accel_free (z_at_Q_spline_acc);
}
void initialise_NFHistory_spline(double *redshifts, double *NF_estimate, int NSpline){
int i, counter, start_index, found_start_index;
// This takes in the data for the calibration curve for the photon non-conservation correction
counter = 0;
start_index = 0;
found_start_index = 0;
FinalNF_Estimate = NF_estimate[0];
FirstNF_Estimate = NF_estimate[NSpline-1];
// Determine the point in the data where its no longer zero (basically to avoid too many zeros in the spline)
for(i=0;i<NSpline-1;i++) {
if(NF_estimate[i+1] > NF_estimate[i]) {
if(found_start_index == 0) {
start_index = i;
found_start_index = 1;
}
}
counter += 1;
}
counter = counter - start_index;
N_calibrated = (counter+1);
// Store the data points for determining the photon non-conservation correction
nf_vals = calloc((counter+1),sizeof(double));
z_vals = calloc((counter+1),sizeof(double));
calibrated_NF_min = 1.;
// Store the data, and determine the end point of the input data for estimating the extrapolated results
for(i=0;i<(counter+1);i++) {
nf_vals[i] = NF_estimate[start_index+i];
z_vals[i] = redshifts[start_index+i];
// At the extreme high redshift end, there can be numerical issues with the solution of the analytic expression
if(i>0) {
while(nf_vals[i] <= nf_vals[i-1]) {
nf_vals[i] += 0.000001;
}
}
if(nf_vals[i] < calibrated_NF_min) {
calibrated_NF_min = nf_vals[i];
}
}
NFHistory_spline_acc = gsl_interp_accel_alloc ();
// NFHistory_spline = gsl_spline_alloc (gsl_interp_cspline, (counter+1));
NFHistory_spline = gsl_spline_alloc (gsl_interp_linear, (counter+1));
gsl_set_error_handler_off();
int gsl_status;
gsl_status = gsl_spline_init(NFHistory_spline, nf_vals, z_vals, (counter+1));
GSL_ERROR(gsl_status);
z_NFHistory_spline_acc = gsl_interp_accel_alloc ();
// z_NFHistory_spline = gsl_spline_alloc (gsl_interp_cspline, (counter+1));
z_NFHistory_spline = gsl_spline_alloc (gsl_interp_linear, (counter+1));
gsl_status = gsl_spline_init(z_NFHistory_spline, z_vals, nf_vals, (counter+1));
GSL_ERROR(gsl_status);
}
void z_at_NFHist(double xHI_Hist, double *splined_value){
float returned_value;
returned_value = gsl_spline_eval(NFHistory_spline, xHI_Hist, NFHistory_spline_acc);
*splined_value = returned_value;
}
void NFHist_at_z(double z, double *splined_value){
float returned_value;
returned_value = gsl_spline_eval(z_NFHistory_spline, z, NFHistory_spline_acc);
*splined_value = returned_value;
}
int ObtainPhotonConsData(
double *z_at_Q_data, double *Q_data, int *Ndata_analytic, double *z_cal_data,
double *nf_cal_data, int *Ndata_calibration,
double *PhotonCons_NFdata, double *PhotonCons_deltaz, int *Ndata_PhotonCons) {
int i;
*Ndata_analytic = N_analytic;
*Ndata_calibration = N_calibrated;
*Ndata_PhotonCons = N_deltaz;
for(i=0;i<N_analytic;i++) {
z_at_Q_data[i] = z_Q[i];
Q_data[i] = Q_value[i];
}
for(i=0;i<N_calibrated;i++) {
z_cal_data[i] = z_vals[i];
nf_cal_data[i] = nf_vals[i];
}
for(i=0;i<N_deltaz;i++) {
PhotonCons_NFdata[i] = NeutralFractions[i];
PhotonCons_deltaz[i] = deltaz[i];
}
return(0);
}
void FreePhotonConsMemory() {
LOG_DEBUG("Freeing some photon cons memory.");
free(deltaz);
free(deltaz_smoothed);
free(NeutralFractions);
free(z_Q);
free(Q_value);
free(nf_vals);
free(z_vals);
free_Q_value();
gsl_spline_free (NFHistory_spline);
gsl_interp_accel_free (NFHistory_spline_acc);
gsl_spline_free (z_NFHistory_spline);
gsl_interp_accel_free (z_NFHistory_spline_acc);
gsl_spline_free (deltaz_spline_for_photoncons);
gsl_interp_accel_free (deltaz_spline_for_photoncons_acc);
LOG_DEBUG("Done Freeing photon cons memory.");
photon_cons_allocated = false;
}
void FreeTsInterpolationTables(struct FlagOptions *flag_options) {
LOG_DEBUG("Freeing some interpolation table memory.");
freeSigmaMInterpTable();
if (flag_options->USE_MASS_DEPENDENT_ZETA) {
free(z_val); z_val = NULL;
free(Nion_z_val);
free(z_X_val); z_X_val = NULL;
free(SFRD_val);
if (flag_options->USE_MINI_HALOS){
free(Nion_z_val_MINI);
free(SFRD_val_MINI);
}
}
else{
free(FgtrM_1DTable_linear);
}
LOG_DEBUG("Done Freeing interpolation table memory.");
interpolation_tables_allocated = false;
}
|
convolution_1x1_pack8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack8_avx(const Mat& kernel, Mat& weight_data_pack8, int num_input, int num_output)
{
// src = kw-kh-inch-outch
// dst = 8b-8a-kw-kh-inch/8a-outch/8b
Mat weight_data_r2 = kernel.reshape(1, num_input, num_output);
weight_data_pack8.create(1, num_input / 8, num_output / 8, (size_t)4 * 64, 64);
for (int q = 0; q + 7 < num_output; q += 8)
{
const Mat k0 = weight_data_r2.channel(q);
const Mat k1 = weight_data_r2.channel(q + 1);
const Mat k2 = weight_data_r2.channel(q + 2);
const Mat k3 = weight_data_r2.channel(q + 3);
const Mat k4 = weight_data_r2.channel(q + 4);
const Mat k5 = weight_data_r2.channel(q + 5);
const Mat k6 = weight_data_r2.channel(q + 6);
const Mat k7 = weight_data_r2.channel(q + 7);
Mat g0 = weight_data_pack8.channel(q / 8);
for (int p = 0; p + 7 < num_input; p += 8)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k04 = k0.row(p + 4);
const float* k05 = k0.row(p + 5);
const float* k06 = k0.row(p + 6);
const float* k07 = k0.row(p + 7);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k14 = k1.row(p + 4);
const float* k15 = k1.row(p + 5);
const float* k16 = k1.row(p + 6);
const float* k17 = k1.row(p + 7);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k24 = k2.row(p + 4);
const float* k25 = k2.row(p + 5);
const float* k26 = k2.row(p + 6);
const float* k27 = k2.row(p + 7);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k34 = k3.row(p + 4);
const float* k35 = k3.row(p + 5);
const float* k36 = k3.row(p + 6);
const float* k37 = k3.row(p + 7);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k44 = k4.row(p + 4);
const float* k45 = k4.row(p + 5);
const float* k46 = k4.row(p + 6);
const float* k47 = k4.row(p + 7);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k54 = k5.row(p + 4);
const float* k55 = k5.row(p + 5);
const float* k56 = k5.row(p + 6);
const float* k57 = k5.row(p + 7);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k64 = k6.row(p + 4);
const float* k65 = k6.row(p + 5);
const float* k66 = k6.row(p + 6);
const float* k67 = k6.row(p + 7);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
const float* k74 = k7.row(p + 4);
const float* k75 = k7.row(p + 5);
const float* k76 = k7.row(p + 6);
const float* k77 = k7.row(p + 7);
float* g00 = g0.row(p / 8);
g00[0] = k00[0];
g00[1] = k10[0];
g00[2] = k20[0];
g00[3] = k30[0];
g00[4] = k40[0];
g00[5] = k50[0];
g00[6] = k60[0];
g00[7] = k70[0];
g00 += 8;
g00[0] = k01[0];
g00[1] = k11[0];
g00[2] = k21[0];
g00[3] = k31[0];
g00[4] = k41[0];
g00[5] = k51[0];
g00[6] = k61[0];
g00[7] = k71[0];
g00 += 8;
g00[0] = k02[0];
g00[1] = k12[0];
g00[2] = k22[0];
g00[3] = k32[0];
g00[4] = k42[0];
g00[5] = k52[0];
g00[6] = k62[0];
g00[7] = k72[0];
g00 += 8;
g00[0] = k03[0];
g00[1] = k13[0];
g00[2] = k23[0];
g00[3] = k33[0];
g00[4] = k43[0];
g00[5] = k53[0];
g00[6] = k63[0];
g00[7] = k73[0];
g00 += 8;
g00[0] = k04[0];
g00[1] = k14[0];
g00[2] = k24[0];
g00[3] = k34[0];
g00[4] = k44[0];
g00[5] = k54[0];
g00[6] = k64[0];
g00[7] = k74[0];
g00 += 8;
g00[0] = k05[0];
g00[1] = k15[0];
g00[2] = k25[0];
g00[3] = k35[0];
g00[4] = k45[0];
g00[5] = k55[0];
g00[6] = k65[0];
g00[7] = k75[0];
g00 += 8;
g00[0] = k06[0];
g00[1] = k16[0];
g00[2] = k26[0];
g00[3] = k36[0];
g00[4] = k46[0];
g00[5] = k56[0];
g00[6] = k66[0];
g00[7] = k76[0];
g00 += 8;
g00[0] = k07[0];
g00[1] = k17[0];
g00[2] = k27[0];
g00[3] = k37[0];
g00[4] = k47[0];
g00[5] = k57[0];
g00[6] = k67[0];
g00[7] = k77[0];
g00 += 8;
}
}
}
static void conv1x1s1_sgemm_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator);
{
int nn_size = size / 12;
int remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 12;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
__m256 _r8 = _mm256_loadu_ps(img0 + 64);
__m256 _r9 = _mm256_loadu_ps(img0 + 72);
__m256 _r10 = _mm256_loadu_ps(img0 + 80);
__m256 _r11 = _mm256_loadu_ps(img0 + 88);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
_mm256_storeu_ps(tmpptr + 64, _r8);
_mm256_storeu_ps(tmpptr + 72, _r9);
_mm256_storeu_ps(tmpptr + 80, _r10);
_mm256_storeu_ps(tmpptr + 88, _r11);
tmpptr += 96;
img0 += bottom_blob.cstep * 8;
}
}
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
__m256 _r4 = _mm256_loadu_ps(img0 + 32);
__m256 _r5 = _mm256_loadu_ps(img0 + 40);
__m256 _r6 = _mm256_loadu_ps(img0 + 48);
__m256 _r7 = _mm256_loadu_ps(img0 + 56);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
_mm256_storeu_ps(tmpptr + 32, _r4);
_mm256_storeu_ps(tmpptr + 40, _r5);
_mm256_storeu_ps(tmpptr + 48, _r6);
_mm256_storeu_ps(tmpptr + 56, _r7);
tmpptr += 64;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
__m256 _r2 = _mm256_loadu_ps(img0 + 16);
__m256 _r3 = _mm256_loadu_ps(img0 + 24);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
_mm256_storeu_ps(tmpptr + 16, _r2);
_mm256_storeu_ps(tmpptr + 24, _r3);
tmpptr += 32;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
__m256 _r1 = _mm256_loadu_ps(img0 + 8);
_mm256_storeu_ps(tmpptr, _r0);
_mm256_storeu_ps(tmpptr + 8, _r1);
tmpptr += 16;
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i * 8;
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(img0);
_mm256_storeu_ps(tmpptr, _r0);
tmpptr += 8;
img0 += bottom_blob.cstep * 8;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
__m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f);
float* outptr = out;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
__m256 _sum8 = _bias0;
__m256 _sum9 = _bias0;
__m256 _sum10 = _bias0;
__m256 _sum11 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_sum4 = _mm256_fmadd_ps(_w0, _val40, _sum4);
_sum4 = _mm256_fmadd_ps(_w1, _val41, _sum4);
_sum4 = _mm256_fmadd_ps(_w2, _val42, _sum4);
_sum4 = _mm256_fmadd_ps(_w3, _val43, _sum4);
_sum4 = _mm256_fmadd_ps(_w4, _val44, _sum4);
_sum4 = _mm256_fmadd_ps(_w5, _val45, _sum4);
_sum4 = _mm256_fmadd_ps(_w6, _val46, _sum4);
_sum4 = _mm256_fmadd_ps(_w7, _val47, _sum4);
_sum5 = _mm256_fmadd_ps(_w0, _val50, _sum5);
_sum5 = _mm256_fmadd_ps(_w1, _val51, _sum5);
_sum5 = _mm256_fmadd_ps(_w2, _val52, _sum5);
_sum5 = _mm256_fmadd_ps(_w3, _val53, _sum5);
_sum5 = _mm256_fmadd_ps(_w4, _val54, _sum5);
_sum5 = _mm256_fmadd_ps(_w5, _val55, _sum5);
_sum5 = _mm256_fmadd_ps(_w6, _val56, _sum5);
_sum5 = _mm256_fmadd_ps(_w7, _val57, _sum5);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_sum6 = _mm256_fmadd_ps(_w0, _val60, _sum6);
_sum6 = _mm256_fmadd_ps(_w1, _val61, _sum6);
_sum6 = _mm256_fmadd_ps(_w2, _val62, _sum6);
_sum6 = _mm256_fmadd_ps(_w3, _val63, _sum6);
_sum6 = _mm256_fmadd_ps(_w4, _val64, _sum6);
_sum6 = _mm256_fmadd_ps(_w5, _val65, _sum6);
_sum6 = _mm256_fmadd_ps(_w6, _val66, _sum6);
_sum6 = _mm256_fmadd_ps(_w7, _val67, _sum6);
_sum7 = _mm256_fmadd_ps(_w0, _val70, _sum7);
_sum7 = _mm256_fmadd_ps(_w1, _val71, _sum7);
_sum7 = _mm256_fmadd_ps(_w2, _val72, _sum7);
_sum7 = _mm256_fmadd_ps(_w3, _val73, _sum7);
_sum7 = _mm256_fmadd_ps(_w4, _val74, _sum7);
_sum7 = _mm256_fmadd_ps(_w5, _val75, _sum7);
_sum7 = _mm256_fmadd_ps(_w6, _val76, _sum7);
_sum7 = _mm256_fmadd_ps(_w7, _val77, _sum7);
__m256 _val80 = _mm256_broadcast_ss(tmpptr + 64);
__m256 _val81 = _mm256_broadcast_ss(tmpptr + 65);
__m256 _val82 = _mm256_broadcast_ss(tmpptr + 66);
__m256 _val83 = _mm256_broadcast_ss(tmpptr + 67);
__m256 _val84 = _mm256_broadcast_ss(tmpptr + 68);
__m256 _val85 = _mm256_broadcast_ss(tmpptr + 69);
__m256 _val86 = _mm256_broadcast_ss(tmpptr + 70);
__m256 _val87 = _mm256_broadcast_ss(tmpptr + 71);
__m256 _val90 = _mm256_broadcast_ss(tmpptr + 72);
__m256 _val91 = _mm256_broadcast_ss(tmpptr + 73);
__m256 _val92 = _mm256_broadcast_ss(tmpptr + 74);
__m256 _val93 = _mm256_broadcast_ss(tmpptr + 75);
__m256 _val94 = _mm256_broadcast_ss(tmpptr + 76);
__m256 _val95 = _mm256_broadcast_ss(tmpptr + 77);
__m256 _val96 = _mm256_broadcast_ss(tmpptr + 78);
__m256 _val97 = _mm256_broadcast_ss(tmpptr + 79);
_sum8 = _mm256_fmadd_ps(_w0, _val80, _sum8);
_sum8 = _mm256_fmadd_ps(_w1, _val81, _sum8);
_sum8 = _mm256_fmadd_ps(_w2, _val82, _sum8);
_sum8 = _mm256_fmadd_ps(_w3, _val83, _sum8);
_sum8 = _mm256_fmadd_ps(_w4, _val84, _sum8);
_sum8 = _mm256_fmadd_ps(_w5, _val85, _sum8);
_sum8 = _mm256_fmadd_ps(_w6, _val86, _sum8);
_sum8 = _mm256_fmadd_ps(_w7, _val87, _sum8);
_sum9 = _mm256_fmadd_ps(_w0, _val90, _sum9);
_sum9 = _mm256_fmadd_ps(_w1, _val91, _sum9);
_sum9 = _mm256_fmadd_ps(_w2, _val92, _sum9);
_sum9 = _mm256_fmadd_ps(_w3, _val93, _sum9);
_sum9 = _mm256_fmadd_ps(_w4, _val94, _sum9);
_sum9 = _mm256_fmadd_ps(_w5, _val95, _sum9);
_sum9 = _mm256_fmadd_ps(_w6, _val96, _sum9);
_sum9 = _mm256_fmadd_ps(_w7, _val97, _sum9);
__m256 _val100 = _mm256_broadcast_ss(tmpptr + 80);
__m256 _val101 = _mm256_broadcast_ss(tmpptr + 81);
__m256 _val102 = _mm256_broadcast_ss(tmpptr + 82);
__m256 _val103 = _mm256_broadcast_ss(tmpptr + 83);
__m256 _val104 = _mm256_broadcast_ss(tmpptr + 84);
__m256 _val105 = _mm256_broadcast_ss(tmpptr + 85);
__m256 _val106 = _mm256_broadcast_ss(tmpptr + 86);
__m256 _val107 = _mm256_broadcast_ss(tmpptr + 87);
__m256 _val110 = _mm256_broadcast_ss(tmpptr + 88);
__m256 _val111 = _mm256_broadcast_ss(tmpptr + 89);
__m256 _val112 = _mm256_broadcast_ss(tmpptr + 90);
__m256 _val113 = _mm256_broadcast_ss(tmpptr + 91);
__m256 _val114 = _mm256_broadcast_ss(tmpptr + 92);
__m256 _val115 = _mm256_broadcast_ss(tmpptr + 93);
__m256 _val116 = _mm256_broadcast_ss(tmpptr + 94);
__m256 _val117 = _mm256_broadcast_ss(tmpptr + 95);
_sum10 = _mm256_fmadd_ps(_w0, _val100, _sum10);
_sum10 = _mm256_fmadd_ps(_w1, _val101, _sum10);
_sum10 = _mm256_fmadd_ps(_w2, _val102, _sum10);
_sum10 = _mm256_fmadd_ps(_w3, _val103, _sum10);
_sum10 = _mm256_fmadd_ps(_w4, _val104, _sum10);
_sum10 = _mm256_fmadd_ps(_w5, _val105, _sum10);
_sum10 = _mm256_fmadd_ps(_w6, _val106, _sum10);
_sum10 = _mm256_fmadd_ps(_w7, _val107, _sum10);
_sum11 = _mm256_fmadd_ps(_w0, _val110, _sum11);
_sum11 = _mm256_fmadd_ps(_w1, _val111, _sum11);
_sum11 = _mm256_fmadd_ps(_w2, _val112, _sum11);
_sum11 = _mm256_fmadd_ps(_w3, _val113, _sum11);
_sum11 = _mm256_fmadd_ps(_w4, _val114, _sum11);
_sum11 = _mm256_fmadd_ps(_w5, _val115, _sum11);
_sum11 = _mm256_fmadd_ps(_w6, _val116, _sum11);
_sum11 = _mm256_fmadd_ps(_w7, _val117, _sum11);
tmpptr += 96;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
_mm256_storeu_ps(outptr + 64, _sum8);
_mm256_storeu_ps(outptr + 72, _sum9);
_mm256_storeu_ps(outptr + 80, _sum10);
_mm256_storeu_ps(outptr + 88, _sum11);
outptr += 96;
}
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
__m256 _sum4 = _bias0;
__m256 _sum5 = _bias0;
__m256 _sum6 = _bias0;
__m256 _sum7 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3);
__m256 _val40 = _mm256_broadcast_ss(tmpptr + 32);
__m256 _val41 = _mm256_broadcast_ss(tmpptr + 33);
__m256 _val42 = _mm256_broadcast_ss(tmpptr + 34);
__m256 _val43 = _mm256_broadcast_ss(tmpptr + 35);
__m256 _val44 = _mm256_broadcast_ss(tmpptr + 36);
__m256 _val45 = _mm256_broadcast_ss(tmpptr + 37);
__m256 _val46 = _mm256_broadcast_ss(tmpptr + 38);
__m256 _val47 = _mm256_broadcast_ss(tmpptr + 39);
__m256 _val50 = _mm256_broadcast_ss(tmpptr + 40);
__m256 _val51 = _mm256_broadcast_ss(tmpptr + 41);
__m256 _val52 = _mm256_broadcast_ss(tmpptr + 42);
__m256 _val53 = _mm256_broadcast_ss(tmpptr + 43);
__m256 _val54 = _mm256_broadcast_ss(tmpptr + 44);
__m256 _val55 = _mm256_broadcast_ss(tmpptr + 45);
__m256 _val56 = _mm256_broadcast_ss(tmpptr + 46);
__m256 _val57 = _mm256_broadcast_ss(tmpptr + 47);
_sum4 = _mm256_fmadd_ps(_w0, _val40, _sum4);
_sum4 = _mm256_fmadd_ps(_w1, _val41, _sum4);
_sum4 = _mm256_fmadd_ps(_w2, _val42, _sum4);
_sum4 = _mm256_fmadd_ps(_w3, _val43, _sum4);
_sum4 = _mm256_fmadd_ps(_w4, _val44, _sum4);
_sum4 = _mm256_fmadd_ps(_w5, _val45, _sum4);
_sum4 = _mm256_fmadd_ps(_w6, _val46, _sum4);
_sum4 = _mm256_fmadd_ps(_w7, _val47, _sum4);
_sum5 = _mm256_fmadd_ps(_w0, _val50, _sum5);
_sum5 = _mm256_fmadd_ps(_w1, _val51, _sum5);
_sum5 = _mm256_fmadd_ps(_w2, _val52, _sum5);
_sum5 = _mm256_fmadd_ps(_w3, _val53, _sum5);
_sum5 = _mm256_fmadd_ps(_w4, _val54, _sum5);
_sum5 = _mm256_fmadd_ps(_w5, _val55, _sum5);
_sum5 = _mm256_fmadd_ps(_w6, _val56, _sum5);
_sum5 = _mm256_fmadd_ps(_w7, _val57, _sum5);
__m256 _val60 = _mm256_broadcast_ss(tmpptr + 48);
__m256 _val61 = _mm256_broadcast_ss(tmpptr + 49);
__m256 _val62 = _mm256_broadcast_ss(tmpptr + 50);
__m256 _val63 = _mm256_broadcast_ss(tmpptr + 51);
__m256 _val64 = _mm256_broadcast_ss(tmpptr + 52);
__m256 _val65 = _mm256_broadcast_ss(tmpptr + 53);
__m256 _val66 = _mm256_broadcast_ss(tmpptr + 54);
__m256 _val67 = _mm256_broadcast_ss(tmpptr + 55);
__m256 _val70 = _mm256_broadcast_ss(tmpptr + 56);
__m256 _val71 = _mm256_broadcast_ss(tmpptr + 57);
__m256 _val72 = _mm256_broadcast_ss(tmpptr + 58);
__m256 _val73 = _mm256_broadcast_ss(tmpptr + 59);
__m256 _val74 = _mm256_broadcast_ss(tmpptr + 60);
__m256 _val75 = _mm256_broadcast_ss(tmpptr + 61);
__m256 _val76 = _mm256_broadcast_ss(tmpptr + 62);
__m256 _val77 = _mm256_broadcast_ss(tmpptr + 63);
_sum6 = _mm256_fmadd_ps(_w0, _val60, _sum6);
_sum6 = _mm256_fmadd_ps(_w1, _val61, _sum6);
_sum6 = _mm256_fmadd_ps(_w2, _val62, _sum6);
_sum6 = _mm256_fmadd_ps(_w3, _val63, _sum6);
_sum6 = _mm256_fmadd_ps(_w4, _val64, _sum6);
_sum6 = _mm256_fmadd_ps(_w5, _val65, _sum6);
_sum6 = _mm256_fmadd_ps(_w6, _val66, _sum6);
_sum6 = _mm256_fmadd_ps(_w7, _val67, _sum6);
_sum7 = _mm256_fmadd_ps(_w0, _val70, _sum7);
_sum7 = _mm256_fmadd_ps(_w1, _val71, _sum7);
_sum7 = _mm256_fmadd_ps(_w2, _val72, _sum7);
_sum7 = _mm256_fmadd_ps(_w3, _val73, _sum7);
_sum7 = _mm256_fmadd_ps(_w4, _val74, _sum7);
_sum7 = _mm256_fmadd_ps(_w5, _val75, _sum7);
_sum7 = _mm256_fmadd_ps(_w6, _val76, _sum7);
_sum7 = _mm256_fmadd_ps(_w7, _val77, _sum7);
tmpptr += 64;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
_mm256_storeu_ps(outptr + 32, _sum4);
_mm256_storeu_ps(outptr + 40, _sum5);
_mm256_storeu_ps(outptr + 48, _sum6);
_mm256_storeu_ps(outptr + 56, _sum7);
outptr += 64;
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
__m256 _sum2 = _bias0;
__m256 _sum3 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
__m256 _val20 = _mm256_broadcast_ss(tmpptr + 16);
__m256 _val21 = _mm256_broadcast_ss(tmpptr + 17);
__m256 _val22 = _mm256_broadcast_ss(tmpptr + 18);
__m256 _val23 = _mm256_broadcast_ss(tmpptr + 19);
__m256 _val24 = _mm256_broadcast_ss(tmpptr + 20);
__m256 _val25 = _mm256_broadcast_ss(tmpptr + 21);
__m256 _val26 = _mm256_broadcast_ss(tmpptr + 22);
__m256 _val27 = _mm256_broadcast_ss(tmpptr + 23);
__m256 _val30 = _mm256_broadcast_ss(tmpptr + 24);
__m256 _val31 = _mm256_broadcast_ss(tmpptr + 25);
__m256 _val32 = _mm256_broadcast_ss(tmpptr + 26);
__m256 _val33 = _mm256_broadcast_ss(tmpptr + 27);
__m256 _val34 = _mm256_broadcast_ss(tmpptr + 28);
__m256 _val35 = _mm256_broadcast_ss(tmpptr + 29);
__m256 _val36 = _mm256_broadcast_ss(tmpptr + 30);
__m256 _val37 = _mm256_broadcast_ss(tmpptr + 31);
_sum2 = _mm256_fmadd_ps(_w0, _val20, _sum2);
_sum2 = _mm256_fmadd_ps(_w1, _val21, _sum2);
_sum2 = _mm256_fmadd_ps(_w2, _val22, _sum2);
_sum2 = _mm256_fmadd_ps(_w3, _val23, _sum2);
_sum2 = _mm256_fmadd_ps(_w4, _val24, _sum2);
_sum2 = _mm256_fmadd_ps(_w5, _val25, _sum2);
_sum2 = _mm256_fmadd_ps(_w6, _val26, _sum2);
_sum2 = _mm256_fmadd_ps(_w7, _val27, _sum2);
_sum3 = _mm256_fmadd_ps(_w0, _val30, _sum3);
_sum3 = _mm256_fmadd_ps(_w1, _val31, _sum3);
_sum3 = _mm256_fmadd_ps(_w2, _val32, _sum3);
_sum3 = _mm256_fmadd_ps(_w3, _val33, _sum3);
_sum3 = _mm256_fmadd_ps(_w4, _val34, _sum3);
_sum3 = _mm256_fmadd_ps(_w5, _val35, _sum3);
_sum3 = _mm256_fmadd_ps(_w6, _val36, _sum3);
_sum3 = _mm256_fmadd_ps(_w7, _val37, _sum3);
tmpptr += 32;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
_mm256_storeu_ps(outptr + 16, _sum2);
_mm256_storeu_ps(outptr + 24, _sum3);
outptr += 32;
}
for (; i + 1 < size; i += 2)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
__m256 _sum0 = _bias0;
__m256 _sum1 = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val00 = _mm256_broadcast_ss(tmpptr);
__m256 _val01 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val02 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val03 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val04 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val05 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val06 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val07 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _val10 = _mm256_broadcast_ss(tmpptr + 8);
__m256 _val11 = _mm256_broadcast_ss(tmpptr + 9);
__m256 _val12 = _mm256_broadcast_ss(tmpptr + 10);
__m256 _val13 = _mm256_broadcast_ss(tmpptr + 11);
__m256 _val14 = _mm256_broadcast_ss(tmpptr + 12);
__m256 _val15 = _mm256_broadcast_ss(tmpptr + 13);
__m256 _val16 = _mm256_broadcast_ss(tmpptr + 14);
__m256 _val17 = _mm256_broadcast_ss(tmpptr + 15);
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
_sum0 = _mm256_fmadd_ps(_w0, _val00, _sum0);
_sum0 = _mm256_fmadd_ps(_w1, _val01, _sum0);
_sum0 = _mm256_fmadd_ps(_w2, _val02, _sum0);
_sum0 = _mm256_fmadd_ps(_w3, _val03, _sum0);
_sum0 = _mm256_fmadd_ps(_w4, _val04, _sum0);
_sum0 = _mm256_fmadd_ps(_w5, _val05, _sum0);
_sum0 = _mm256_fmadd_ps(_w6, _val06, _sum0);
_sum0 = _mm256_fmadd_ps(_w7, _val07, _sum0);
_sum1 = _mm256_fmadd_ps(_w0, _val10, _sum1);
_sum1 = _mm256_fmadd_ps(_w1, _val11, _sum1);
_sum1 = _mm256_fmadd_ps(_w2, _val12, _sum1);
_sum1 = _mm256_fmadd_ps(_w3, _val13, _sum1);
_sum1 = _mm256_fmadd_ps(_w4, _val14, _sum1);
_sum1 = _mm256_fmadd_ps(_w5, _val15, _sum1);
_sum1 = _mm256_fmadd_ps(_w6, _val16, _sum1);
_sum1 = _mm256_fmadd_ps(_w7, _val17, _sum1);
tmpptr += 16;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum0);
_mm256_storeu_ps(outptr + 8, _sum1);
outptr += 16;
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
__m256 _sum = _bias0;
const float* kptr = (const float*)kernel + p * inch * 64;
for (int q = 0; q < inch; q++)
{
__m256 _val0 = _mm256_broadcast_ss(tmpptr);
__m256 _val1 = _mm256_broadcast_ss(tmpptr + 1);
__m256 _val2 = _mm256_broadcast_ss(tmpptr + 2);
__m256 _val3 = _mm256_broadcast_ss(tmpptr + 3);
__m256 _val4 = _mm256_broadcast_ss(tmpptr + 4);
__m256 _val5 = _mm256_broadcast_ss(tmpptr + 5);
__m256 _val6 = _mm256_broadcast_ss(tmpptr + 6);
__m256 _val7 = _mm256_broadcast_ss(tmpptr + 7);
__m256 _w0 = _mm256_loadu_ps(kptr);
__m256 _w1 = _mm256_loadu_ps(kptr + 8);
__m256 _w2 = _mm256_loadu_ps(kptr + 16);
__m256 _w3 = _mm256_loadu_ps(kptr + 24);
__m256 _w4 = _mm256_loadu_ps(kptr + 32);
__m256 _w5 = _mm256_loadu_ps(kptr + 40);
__m256 _w6 = _mm256_loadu_ps(kptr + 48);
__m256 _w7 = _mm256_loadu_ps(kptr + 56);
_sum = _mm256_fmadd_ps(_w0, _val0, _sum);
_sum = _mm256_fmadd_ps(_w1, _val1, _sum);
_sum = _mm256_fmadd_ps(_w2, _val2, _sum);
_sum = _mm256_fmadd_ps(_w3, _val3, _sum);
_sum = _mm256_fmadd_ps(_w4, _val4, _sum);
_sum = _mm256_fmadd_ps(_w5, _val5, _sum);
_sum = _mm256_fmadd_ps(_w6, _val6, _sum);
_sum = _mm256_fmadd_ps(_w7, _val7, _sum);
tmpptr += 8;
kptr += 64;
}
_mm256_storeu_ps(outptr, _sum);
outptr += 8;
}
}
}
static void conv1x1s2_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m256 _v = _mm256_loadu_ps(r0);
_mm256_storeu_ps(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8_avx(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
omp_utils.h | /**
* !file omp_utils.h
* \brief OpenMP utilities
*/
#ifndef OMP_UTILS_H
#define OMP_UTILS_H
#ifdef USEOPENMP
#include <omp.h>
#endif // USEOPENMP
namespace NBody
{
#ifdef USEOPENMP
int get_available_threads()
{
int nthreads;
#pragma omp parallel
#pragma omp single
{
nthreads = omp_get_num_threads();
}
return nthreads;
}
#else
int get_available_threads()
{
return 1;
}
#endif // USEOPENMP
#ifdef USEOPENMP
/// Returns whether OpenMP nesting is enabled or not
static bool _omp_get_nested()
{
#if _OPENMP >= 200805
return omp_get_max_active_levels() > 1;
#else
return omp_get_nested();
#endif
}
/// Set OpenMP to enabled (or not) nested parallelism
static void _omp_set_nested(bool enable)
{
static constexpr int MAX_OPENMP_ACTIVE_LEVELS = 20;
#if _OPENMP >= 200805
omp_set_max_active_levels(enable ? MAX_OPENMP_ACTIVE_LEVELS : 1);
#else
omp_set_nested(int(enable));
#endif
}
#endif // USEOPENMP
#ifdef USEOPENMP
/**
* A class that enables OpenMP nested calls upon construction, if requested,
* and restores the previous behavior upon destruction.
*/
class OmpNestedEnabler
{
public:
OmpNestedEnabler(bool enable)
: nested_previously_enabled(_omp_get_nested()),
_available_threads(get_available_threads())
{
if (nested_previously_enabled || _available_threads == 1) {
return;
}
_omp_set_nested(enable);
}
~OmpNestedEnabler()
{
_omp_set_nested(nested_previously_enabled);
}
int available_threads()
{
return _available_threads;
}
private:
bool nested_previously_enabled;
int _available_threads;
};
#else
class OmpNestedEnabler
{
public:
OmpNestedEnabler(bool enable)
{
}
int available_threads()
{
return get_available_threads();
}
};
#endif
}
#endif // OMP_UTILS_H |
residual_criteria.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi
//
#if !defined(KRATOS_RESIDUAL_CRITERIA )
#define KRATOS_RESIDUAL_CRITERIA
// System includes
// External includes
// Project includes
#include "includes/model_part.h"
#include "includes/define.h"
#include "utilities/constraint_utilities.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualCriteria
* @ingroup KratosCore
* @brief This is a convergence criteria that employes the residual as criteria
* @details The reactions from the RHS are not computed in the residual
* @author Riccardo Rossi
*/
template<class TSparseSpace,
class TDenseSpace
>
class ResidualCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ResidualCriteria );
/// The definition of the base ConvergenceCriteria
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
/// The definition of the current class
typedef ResidualCriteria< TSparseSpace, TDenseSpace > ClassType;
/// The data type
typedef typename BaseType::TDataType TDataType;
/// The dofs array type
typedef typename BaseType::DofsArrayType DofsArrayType;
/// The sparse matrix type
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
/// The dense vector type
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// Definition of the IndexType
typedef std::size_t IndexType;
/// Definition of the size type
typedef std::size_t SizeType;
///@}
///@name Life Cycle
///@{
//* Constructor.
explicit ResidualCriteria()
: BaseType()
{
}
/**
* @brief Default constructor. (with parameters)
* @param ThisParameters The configuration parameters
*/
explicit ResidualCriteria(Kratos::Parameters ThisParameters)
: BaseType()
{
// Validate and assign defaults
ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters());
this->AssignSettings(ThisParameters);
this->mActualizeRHSIsNeeded = true;
}
//* Constructor.
explicit ResidualCriteria(
TDataType NewRatioTolerance,
TDataType AlwaysConvergedNorm)
: BaseType(),
mRatioTolerance(NewRatioTolerance),
mAlwaysConvergedNorm(AlwaysConvergedNorm)
{
this->mActualizeRHSIsNeeded = true;
}
//* Copy constructor.
explicit ResidualCriteria( ResidualCriteria const& rOther )
:BaseType(rOther)
,mRatioTolerance(rOther.mRatioTolerance)
,mInitialResidualNorm(rOther.mInitialResidualNorm)
,mCurrentResidualNorm(rOther.mCurrentResidualNorm)
,mAlwaysConvergedNorm(rOther.mAlwaysConvergedNorm)
,mReferenceDispNorm(rOther.mReferenceDispNorm)
{
this->mActualizeRHSIsNeeded = true;
}
//* Destructor.
~ResidualCriteria() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Create method
* @param ThisParameters The configuration parameters
*/
typename BaseType::Pointer Create(Parameters ThisParameters) const override
{
return Kratos::make_shared<ClassType>(ThisParameters);
}
/**
* @brief Criterias that need to be called after getting the solution
* @details Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual + reactions)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
const SizeType size_b = TSparseSpace::Size(rb);
if (size_b != 0) { //if we are solving for something
SizeType size_residual;
CalculateResidualNorm(rModelPart, mCurrentResidualNorm, size_residual, rDofSet, rb);
TDataType ratio = 0.0;
if(mInitialResidualNorm < std::numeric_limits<TDataType>::epsilon()) {
ratio = 0.0;
} else {
ratio = mCurrentResidualNorm/mInitialResidualNorm;
}
const TDataType float_size_residual = static_cast<TDataType>(size_residual);
const TDataType absolute_norm = (mCurrentResidualNorm/float_size_residual);
KRATOS_INFO_IF("RESIDUAL CRITERION", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << " :: [ Initial residual norm = " << mInitialResidualNorm << "; Current residual norm = " << mCurrentResidualNorm << "]" << std::endl;
KRATOS_INFO_IF("RESIDUAL CRITERION", this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) << " :: [ Obtained ratio = " << ratio << "; Expected ratio = " << mRatioTolerance << "; Absolute norm = " << absolute_norm << "; Expected norm = " << mAlwaysConvergedNorm << "]" << std::endl;
rModelPart.GetProcessInfo()[CONVERGENCE_RATIO] = ratio;
rModelPart.GetProcessInfo()[RESIDUAL_NORM] = absolute_norm;
if (ratio <= mRatioTolerance || absolute_norm < mAlwaysConvergedNorm) {
KRATOS_INFO_IF("RESIDUAL CRITERION", this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) << "Convergence is achieved" << std::endl;
return true;
} else {
return false;
}
} else {
return true;
}
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the problem. (unused)
*/
void Initialize(ModelPart& rModelPart) override
{
BaseType::Initialize(rModelPart);
KRATOS_ERROR_IF(rModelPart.IsDistributed() && rModelPart.NumberOfMasterSlaveConstraints() > 0) << "This Criteria does not yet support constraints in MPI!" << std::endl;
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual + reactions)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
BaseType::InitializeSolutionStep(rModelPart, rDofSet, rA, rDx, rb);
// Filling mActiveDofs when MPC exist
if (rModelPart.NumberOfMasterSlaveConstraints() > 0) {
ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet);
}
SizeType size_residual;
CalculateResidualNorm(rModelPart, mInitialResidualNorm, size_residual, rDofSet, rb);
}
/**
* @brief This function finalizes the solution step
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param A System matrix (unused)
* @param Dx Vector of results (variations on nodal variables)
* @param b RHS vector (residual + reactions)
*/
void FinalizeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
BaseType::FinalizeSolutionStep(rModelPart, rDofSet, rA, rDx, rb);
}
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "residual_criteria",
"residual_absolute_tolerance" : 1.0e-4,
"residual_relative_tolerance" : 1.0e-9
})");
// Getting base class default parameters
const Parameters base_default_parameters = BaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/**
* @brief Returns the name of the class as used in the settings (snake_case format)
* @return The name of the class
*/
static std::string Name()
{
return "residual_criteria";
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualCriteria";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method computes the norm of the residual
* @details It checks if the dof is fixed
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rResidualSolutionNorm The norm of the residual
* @param rDofNum The number of DoFs
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rb RHS vector (residual + reactions)
*/
virtual void CalculateResidualNorm(
ModelPart& rModelPart,
TDataType& rResidualSolutionNorm,
SizeType& rDofNum,
DofsArrayType& rDofSet,
const TSystemVectorType& rb
)
{
// Initialize
TDataType residual_solution_norm = TDataType();
SizeType dof_num = 0;
// Auxiliar values
TDataType residual_dof_value = 0.0;
const auto it_dof_begin = rDofSet.begin();
const int number_of_dof = static_cast<int>(rDofSet.size());
// Loop over Dofs
if (rModelPart.NumberOfMasterSlaveConstraints() > 0) {
#pragma omp parallel for firstprivate(residual_dof_value) reduction(+:residual_solution_norm, dof_num)
for (int i = 0; i < number_of_dof; i++) {
auto it_dof = it_dof_begin + i;
const IndexType dof_id = it_dof->EquationId();
if (mActiveDofs[dof_id] == 1) {
residual_dof_value = TSparseSpace::GetValue(rb,dof_id);
residual_solution_norm += std::pow(residual_dof_value, 2);
dof_num++;
}
}
} else {
#pragma omp parallel for firstprivate(residual_dof_value) reduction(+:residual_solution_norm, dof_num)
for (int i = 0; i < number_of_dof; i++) {
auto it_dof = it_dof_begin + i;
if (!it_dof->IsFixed()) {
const IndexType dof_id = it_dof->EquationId();
residual_dof_value = TSparseSpace::GetValue(rb,dof_id);
residual_solution_norm += std::pow(residual_dof_value, 2);
dof_num++;
}
}
}
rDofNum = dof_num;
rResidualSolutionNorm = std::sqrt(residual_solution_norm);
}
/**
* @brief This method assigns settings to member variables
* @param ThisParameters Parameters that are assigned to the member variables
*/
void AssignSettings(const Parameters ThisParameters) override
{
BaseType::AssignSettings(ThisParameters);
mAlwaysConvergedNorm = ThisParameters["residual_absolute_tolerance"].GetDouble();
mRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble();
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
TDataType mRatioTolerance; /// The ratio threshold for the norm of the residual
TDataType mInitialResidualNorm; /// The reference norm of the residual
TDataType mCurrentResidualNorm; /// The current norm of the residual
TDataType mAlwaysConvergedNorm; /// The absolute value threshold for the norm of the residual
TDataType mReferenceDispNorm; /// The norm at the beginning of the iterations
std::vector<int> mActiveDofs; /// This vector contains the dofs that are active
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class ClassName
///@}
///@name Type Definitions
///@{
///@}
} // namespace Kratos.
#endif // KRATOS_NEW_DISPLACEMENT_CRITERIA defined
|
par_relax_more.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* a few more relaxation schemes: Chebychev, FCF-Jacobi, CG -
* these do not go through the CF interface (hypre_BoomerAMGRelaxIF)
*
*****************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "float.h"
/******************************************************************************
*
* use Gershgorin discs to estimate smallest and largest eigenvalues
* A is assumed to be symmetric
* For SPD matrix, it returns [0, max_eig = max (aii + ri)],
* ri is radius of disc centered at a_ii
* For SND matrix, it returns [min_eig = min (aii - ri), 0]
*
* scale > 0: compute eigen estimate of D^{-1/2}*A*D^{-1/2}, where
* D = diag(A) for SPD matrix, D = -diag(A) for SND
*
* scale = 1: The algorithm is performed on D^{-1}*A, since it
* has the same eigenvalues as D^{-1/2}*A*D^{-1/2}
* scale = 2: The algorithm is performed on D^{-1/2}*A*D^{-1/2} (TODO)
*
*****************************************************************************/
HYPRE_Int
hypre_ParCSRMaxEigEstimateHost( hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Int scale, /* scale by diagonal? */
HYPRE_Real *max_eig,
HYPRE_Real *min_eig )
{
HYPRE_Int A_num_rows = hypre_ParCSRMatrixNumRows(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(hypre_ParCSRMatrixDiag(A));
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(A));
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A));
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(A));
HYPRE_Real *diag = NULL;
HYPRE_Int i, j;
HYPRE_Real e_max, e_min;
HYPRE_Real send_buf[2], recv_buf[2];
HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A);
if (scale > 1)
{
diag = hypre_TAlloc(HYPRE_Real, A_num_rows, memory_location);
}
for (i = 0; i < A_num_rows; i++)
{
HYPRE_Real a_ii = 0.0, r_i = 0.0, lower, upper;
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
if (A_diag_j[j] == i)
{
a_ii = A_diag_data[j];
}
else
{
r_i += hypre_abs(A_diag_data[j]);
}
}
for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++)
{
r_i += hypre_abs(A_offd_data[j]);
}
lower = a_ii - r_i;
upper = a_ii + r_i;
if (scale == 1)
{
lower /= hypre_abs(a_ii);
upper /= hypre_abs(a_ii);
}
if (i)
{
e_max = hypre_max(e_max, upper);
e_min = hypre_min(e_min, lower);
}
else
{
e_max = upper;
e_min = lower;
}
}
send_buf[0] = -e_min;
send_buf[1] = e_max;
/* get e_min e_max across procs */
hypre_MPI_Allreduce(send_buf, recv_buf, 2, HYPRE_MPI_REAL, hypre_MPI_MAX,
hypre_ParCSRMatrixComm(A));
e_min = -recv_buf[0];
e_max = recv_buf[1];
/* return */
if ( hypre_abs(e_min) > hypre_abs(e_max) )
{
*min_eig = e_min;
*max_eig = hypre_min(0.0, e_max);
}
else
{
*min_eig = hypre_max(e_min, 0.0);
*max_eig = e_max;
}
hypre_TFree(diag, memory_location);
return hypre_error_flag;
}
/**
* @brief Estimates the max eigenvalue using infinity norm. Will determine
* whether or not to use host or device internally
*
* @param[in] A Matrix to relax with
* @param[in] to scale by diagonal
* @param[out] Maximum eigenvalue
*/
HYPRE_Int
hypre_ParCSRMaxEigEstimate(hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Real *max_eig,
HYPRE_Real *min_eig)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPushRange("ParCSRMaxEigEstimate");
#endif
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
HYPRE_Int ierr = 0;
if (exec == HYPRE_EXEC_HOST)
{
ierr = hypre_ParCSRMaxEigEstimateHost(A, scale, max_eig, min_eig);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
else
{
ierr = hypre_ParCSRMaxEigEstimateDevice(A, scale, max_eig, min_eig);
}
#endif
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPopRange();
#endif
return ierr;
}
/**
* @brief Uses CG to get the eigenvalue estimate. Will determine whether to use
* host or device internally
*
* @param[in] A Matrix to relax with
* @param[in] scale Gets the eigenvalue est of D^{-1/2} A D^{-1/2}
* @param[in] max_iter Maximum number of iterations for CG
* @param[out] max_eig Estimated max eigenvalue
* @param[out] min_eig Estimated min eigenvalue
*/
HYPRE_Int
hypre_ParCSRMaxEigEstimateCG(hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int max_iter,
HYPRE_Real *max_eig,
HYPRE_Real *min_eig)
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPushRange("ParCSRMaxEigEstimateCG");
#endif
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_ParCSRMatrixMemoryLocation(A));
HYPRE_Int ierr = 0;
if (exec == HYPRE_EXEC_HOST)
{
ierr = hypre_ParCSRMaxEigEstimateCGHost(A, scale, max_iter, max_eig, min_eig);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
else
{
ierr = hypre_ParCSRMaxEigEstimateCGDevice(A, scale, max_iter, max_eig, min_eig);
}
#endif
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPopRange();
#endif
return ierr;
}
/**
* @brief Uses CG to get the eigenvalue estimate on the host
*
* @param[in] A Matrix to relax with
* @param[in] scale Gets the eigenvalue est of D^{-1/2} A D^{-1/2}
* @param[in] max_iter Maximum number of iterations for CG
* @param[out] max_eig Estimated max eigenvalue
* @param[out] min_eig Estimated min eigenvalue
*/
HYPRE_Int
hypre_ParCSRMaxEigEstimateCGHost( hypre_ParCSRMatrix *A, /* matrix to relax with */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int max_iter,
HYPRE_Real *max_eig,
HYPRE_Real *min_eig )
{
HYPRE_Int i, j, err;
hypre_ParVector *p;
hypre_ParVector *s;
hypre_ParVector *r;
hypre_ParVector *ds;
hypre_ParVector *u;
HYPRE_Real *tridiag = NULL;
HYPRE_Real *trioffd = NULL;
HYPRE_Real lambda_max ;
HYPRE_Real beta, gamma = 0.0, alpha, sdotp, gamma_old, alphainv;
HYPRE_Real lambda_min;
HYPRE_Real *s_data, *p_data, *ds_data, *u_data;
HYPRE_Int local_size = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
/* check the size of A - don't iterate more than the size */
HYPRE_BigInt size = hypre_ParCSRMatrixGlobalNumRows(A);
if (size < (HYPRE_BigInt) max_iter)
{
max_iter = (HYPRE_Int) size;
}
/* create some temp vectors: p, s, r , ds, u*/
r = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(r);
p = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(p);
s = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(s);
ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(ds);
u = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(u);
/* point to local data */
s_data = hypre_VectorData(hypre_ParVectorLocalVector(s));
p_data = hypre_VectorData(hypre_ParVectorLocalVector(p));
ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds));
u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
/* make room for tri-diag matrix */
tridiag = hypre_CTAlloc(HYPRE_Real, max_iter + 1, HYPRE_MEMORY_HOST);
trioffd = hypre_CTAlloc(HYPRE_Real, max_iter + 1, HYPRE_MEMORY_HOST);
for (i = 0; i < max_iter + 1; i++)
{
tridiag[i] = 0;
trioffd[i] = 0;
}
/* set residual to random */
hypre_ParVectorSetRandomValues(r, 1);
if (scale)
{
hypre_CSRMatrixExtractDiagonal(hypre_ParCSRMatrixDiag(A), ds_data, 4);
}
else
{
/* set ds to 1 */
hypre_ParVectorSetConstantValues(ds, 1.0);
}
/* gamma = <r,Cr> */
gamma = hypre_ParVectorInnerProd(r, p);
/* for the initial filling of the tridiag matrix */
beta = 1.0;
i = 0;
while (i < max_iter)
{
/* s = C*r */
/* TO DO: C = diag scale */
hypre_ParVectorCopy(r, s);
/*gamma = <r,Cr> */
gamma_old = gamma;
gamma = hypre_ParVectorInnerProd(r, s);
if (gamma < HYPRE_REAL_EPSILON)
{
break;
}
if (i == 0)
{
beta = 1.0;
/* p_0 = C*r */
hypre_ParVectorCopy(s, p);
}
else
{
/* beta = gamma / gamma_old */
beta = gamma / gamma_old;
/* p = s + beta p */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < local_size; j++)
{
p_data[j] = s_data[j] + beta * p_data[j];
}
}
if (scale)
{
/* s = D^{-1/2}A*D^{-1/2}*p */
for (j = 0; j < local_size; j++)
{
u_data[j] = ds_data[j] * p_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, s);
for (j = 0; j < local_size; j++)
{
s_data[j] = ds_data[j] * s_data[j];
}
}
else
{
/* s = A*p */
hypre_ParCSRMatrixMatvec(1.0, A, p, 0.0, s);
}
/* <s,p> */
sdotp = hypre_ParVectorInnerProd(s, p);
/* alpha = gamma / <s,p> */
alpha = gamma / sdotp;
/* get tridiagonal matrix */
alphainv = 1.0 / alpha;
tridiag[i + 1] = alphainv;
tridiag[i] *= beta;
tridiag[i] += alphainv;
trioffd[i + 1] = alphainv;
trioffd[i] *= sqrt(beta);
/* x = x + alpha*p */
/* don't need */
/* r = r - alpha*s */
hypre_ParVectorAxpy(-alpha, s, r);
i++;
}
/* eispack routine - eigenvalues return in tridiag and ordered*/
hypre_LINPACKcgtql1(&i, tridiag, trioffd, &err);
lambda_max = tridiag[i - 1];
lambda_min = tridiag[0];
/* hypre_printf("linpack max eig est = %g\n", lambda_max);*/
/* hypre_printf("linpack min eig est = %g\n", lambda_min);*/
hypre_TFree(tridiag, HYPRE_MEMORY_HOST);
hypre_TFree(trioffd, HYPRE_MEMORY_HOST);
hypre_ParVectorDestroy(r);
hypre_ParVectorDestroy(s);
hypre_ParVectorDestroy(p);
hypre_ParVectorDestroy(ds);
hypre_ParVectorDestroy(u);
/* return */
*max_eig = lambda_max;
*min_eig = lambda_min;
return hypre_error_flag;
}
/******************************************************************************
Chebyshev relaxation
Can specify order 1-4 (this is the order of the resid polynomial)- here we
explicitly code the coefficients (instead of iteratively determining)
variant 0: standard chebyshev
this is rlx 11 if scale = 0, and 16 if scale == 1
variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t)
this is rlx 15 if scale = 0, and 17 if scale == 1
ratio indicates the percentage of the whole spectrum to use (so .5
means half, and .1 means 10percent)
*******************************************************************************/
HYPRE_Int
hypre_ParCSRRelax_Cheby(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
HYPRE_Real max_eig,
HYPRE_Real min_eig,
HYPRE_Real fraction,
HYPRE_Int order, /* polynomial order */
HYPRE_Int scale, /* scale by diagonal?*/
HYPRE_Int variant,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v, /* temporary vector */
hypre_ParVector *r /*another temp vector */)
{
HYPRE_Real *coefs = NULL;
HYPRE_Real *ds_data = NULL;
hypre_ParVector *tmp_vec = NULL;
hypre_ParVector *orig_u_vec = NULL;
hypre_ParCSRRelax_Cheby_Setup(A, max_eig, min_eig, fraction, order, scale, variant, &coefs,
&ds_data);
orig_u_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize_v2(orig_u_vec, hypre_ParCSRMatrixMemoryLocation(A));
if (scale)
{
tmp_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize_v2(tmp_vec, hypre_ParCSRMatrixMemoryLocation(A));
}
hypre_ParCSRRelax_Cheby_Solve(A, f, ds_data, coefs, order, scale, variant, u, v, r, orig_u_vec,
tmp_vec);
hypre_TFree(ds_data, hypre_ParCSRMatrixMemoryLocation(A));
hypre_TFree(coefs, HYPRE_MEMORY_HOST);
hypre_ParVectorDestroy(orig_u_vec);
hypre_ParVectorDestroy(tmp_vec);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* CG Smoother
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRRelax_CG( HYPRE_Solver solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
HYPRE_Int num_its)
{
HYPRE_PCGSetMaxIter(solver, num_its); /* max iterations */
HYPRE_PCGSetTol(solver, 0.0); /* max iterations */
HYPRE_ParCSRPCGSolve(solver, (HYPRE_ParCSRMatrix)A, (HYPRE_ParVector)f, (HYPRE_ParVector)u);
#if 0
{
HYPRE_Int myid;
HYPRE_Int num_iterations;
HYPRE_Real final_res_norm;
hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid);
HYPRE_PCGGetNumIterations(solver, &num_iterations);
HYPRE_PCGGetFinalRelativeResidualNorm(solver, &final_res_norm);
if (myid == 0)
{
hypre_printf(" -----CG PCG Iterations = %d\n", num_iterations);
hypre_printf(" -----CG PCG Final Relative Residual Norm = %e\n", final_res_norm);
}
}
#endif
return hypre_error_flag;
}
/* tql1.f --
this is the eispack translation - from Barry Smith in Petsc
Note that this routine always uses real numbers (not complex) even
if the underlying matrix is Hermitian. This is because the Lanczos
process applied to Hermitian matrices always produces a real,
symmetric tridiagonal matrix.
*/
HYPRE_Int
hypre_LINPACKcgtql1(HYPRE_Int *n, HYPRE_Real *d, HYPRE_Real *e, HYPRE_Int *ierr)
{
/* System generated locals */
HYPRE_Int i__1, i__2;
HYPRE_Real d__1, d__2, c_b10 = 1.0;
/* Local variables */
HYPRE_Real c, f, g, h;
HYPRE_Int i, j, l, m;
HYPRE_Real p, r, s, c2, c3 = 0.0;
HYPRE_Int l1, l2;
HYPRE_Real s2 = 0.0;
HYPRE_Int ii;
HYPRE_Real dl1, el1;
HYPRE_Int mml;
HYPRE_Real tst1, tst2;
/* THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL1, */
/* NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND */
/* WILKINSON. */
/* HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971). */
/* THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC */
/* TRIDIAGONAL MATRIX BY THE QL METHOD. */
/* ON INPUT */
/* N IS THE ORDER OF THE MATRIX. */
/* D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. */
/* E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX */
/* IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY. */
/* ON OUTPUT */
/* D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN */
/* ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND */
/* ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE */
/* THE SMALLEST EIGENVALUES. */
/* E HAS BEEN DESTROYED. */
/* IERR IS SET TO */
/* ZERO FOR NORMAL RETURN, */
/* J IF THE J-TH EIGENVALUE HAS NOT BEEN */
/* DETERMINED AFTER 30 ITERATIONS. */
/* CALLS CGPTHY FOR DSQRT(A*A + B*B) . */
/* QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, */
/* MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY
*/
/* THIS VERSION DATED AUGUST 1983. */
/* ------------------------------------------------------------------
*/
HYPRE_Real ds;
--e;
--d;
*ierr = 0;
if (*n == 1)
{
goto L1001;
}
i__1 = *n;
for (i = 2; i <= i__1; ++i)
{
e[i - 1] = e[i];
}
f = 0.;
tst1 = 0.;
e[*n] = 0.;
i__1 = *n;
for (l = 1; l <= i__1; ++l)
{
j = 0;
h = (d__1 = d[l], fabs(d__1)) + (d__2 = e[l], fabs(d__2));
if (tst1 < h)
{
tst1 = h;
}
/* .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT .......... */
i__2 = *n;
for (m = l; m <= i__2; ++m)
{
tst2 = tst1 + (d__1 = e[m], fabs(d__1));
if (tst2 == tst1)
{
goto L120;
}
/* .......... E(N) IS ALWAYS ZERO,SO THERE IS NO EXIT */
/* THROUGH THE BOTTOM OF THE LOOP .......... */
}
L120:
if (m == l)
{
goto L210;
}
L130:
if (j == 30)
{
goto L1000;
}
++j;
/* .......... FORM SHIFT .......... */
l1 = l + 1;
l2 = l1 + 1;
g = d[l];
p = (d[l1] - g) / (e[l] * 2.);
r = hypre_LINPACKcgpthy(&p, &c_b10);
ds = 1.0;
if (p < 0.0) { ds = -1.0; }
d[l] = e[l] / (p + ds * r);
d[l1] = e[l] * (p + ds * r);
dl1 = d[l1];
h = g - d[l];
if (l2 > *n)
{
goto L145;
}
i__2 = *n;
for (i = l2; i <= i__2; ++i)
{
d[i] -= h;
}
L145:
f += h;
/* .......... QL TRANSFORMATION .......... */
p = d[m];
c = 1.;
c2 = c;
el1 = e[l1];
s = 0.;
mml = m - l;
/* .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... */
i__2 = mml;
for (ii = 1; ii <= i__2; ++ii)
{
c3 = c2;
c2 = c;
s2 = s;
i = m - ii;
g = c * e[i];
h = c * p;
r = hypre_LINPACKcgpthy(&p, &e[i]);
e[i + 1] = s * r;
s = e[i] / r;
c = p / r;
p = c * d[i] - s * g;
d[i + 1] = h + s * (c * g + s * d[i]);
}
p = -s * s2 * c3 * el1 * e[l] / dl1;
e[l] = s * p;
d[l] = c * p;
tst2 = tst1 + (d__1 = e[l], fabs(d__1));
if (tst2 > tst1)
{
goto L130;
}
L210:
p = d[l] + f;
/* .......... ORDER EIGENVALUES .......... */
if (l == 1)
{
goto L250;
}
/* .......... FOR I=L STEP -1 UNTIL 2 DO -- .......... */
i__2 = l;
for (ii = 2; ii <= i__2; ++ii)
{
i = l + 2 - ii;
if (p >= d[i - 1])
{
goto L270;
}
d[i] = d[i - 1];
}
L250:
i = 1;
L270:
d[i] = p;
}
goto L1001;
/* .......... SET ERROR -- NO CONVERGENCE TO AN */
/* EIGENVALUE AFTER 30 ITERATIONS .......... */
L1000:
*ierr = l;
L1001:
return 0;
} /* cgtql1_ */
HYPRE_Real
hypre_LINPACKcgpthy(HYPRE_Real *a, HYPRE_Real *b)
{
/* System generated locals */
HYPRE_Real ret_val, d__1, d__2, d__3;
/* Local variables */
HYPRE_Real p, r, s, t, u;
/* FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW */
/* Computing MAX */
d__1 = fabs(*a), d__2 = fabs(*b);
p = hypre_max(d__1, d__2);
if (!p)
{
goto L20;
}
/* Computing MIN */
d__2 = fabs(*a), d__3 = fabs(*b);
/* Computing 2nd power */
d__1 = hypre_min(d__2, d__3) / p;
r = d__1 * d__1;
L10:
t = r + 4.;
if (t == 4.)
{
goto L20;
}
s = r / t;
u = s * 2. + 1.;
p = u * p;
/* Computing 2nd power */
d__1 = s / u;
r = d__1 * d__1 * r;
goto L10;
L20:
ret_val = p;
return ret_val;
} /* cgpthy_ */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.