source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
csr_matvec.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
/* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */
HYPRE_Int
hypre_CSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
#ifdef HYPRE_PROFILE
HYPRE_Real time_begin = hypre_MPI_Wtime();
#endif
#if defined(HYPRE_USING_CUDA) /* CUDA */
#ifdef HYPRE_BIGINT
HYPRE_Int ierr = hypre_CSRMatrixMatvecDeviceBIGINT(alpha, A, x, beta, b, y, offset);
#else
HYPRE_Int ierr = hypre_CSRMatrixMatvecDevice(0, alpha, A, x, beta, b, y, offset);
#endif
#elif defined(HYPRE_USING_DEVICE_OPENMP) /* OMP 4.5 */
HYPRE_Int ierr = hypre_CSRMatrixMatvecOutOfPlaceOOMP(0, alpha, A, x, beta, b, y, offset);
#else /* CPU */
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A) + offset;
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A) - offset;
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
/*HYPRE_Int num_nnz = hypre_CSRMatrixNumNonzeros(A);*/
HYPRE_Int *A_rownnz = hypre_CSRMatrixRownnz(A);
HYPRE_Int num_rownnz = hypre_CSRMatrixNumRownnz(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *b_data = hypre_VectorData(b) + offset;
HYPRE_Complex *y_data = hypre_VectorData(y) + offset;
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int b_size = hypre_VectorSize(b) - offset;
HYPRE_Int y_size = hypre_VectorSize(y) - offset;
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
HYPRE_Int idxstride_y = hypre_VectorIndexStride(y);
HYPRE_Int vecstride_y = hypre_VectorVectorStride(y);
/*HYPRE_Int idxstride_b = hypre_VectorIndexStride(b);
HYPRE_Int vecstride_b = hypre_VectorVectorStride(b);*/
HYPRE_Int idxstride_x = hypre_VectorIndexStride(x);
HYPRE_Int vecstride_x = hypre_VectorVectorStride(x);
HYPRE_Complex temp, tempx;
HYPRE_Int i, j, jj, m, ierr=0;
HYPRE_Real xpar=0.7;
hypre_Vector *x_tmp = NULL;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
hypre_assert( num_vectors == hypre_VectorNumVectors(b) );
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size || num_rows != b_size)
ierr = 2;
if (num_cols != x_size && (num_rows != y_size || num_rows != b_size))
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = beta*b_data[i];
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
if (x == y)
{
x_tmp = hypre_SeqVectorCloneDeep(x);
x_data = hypre_VectorData(x_tmp);
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
/* use rownnz pointer to do the A*x multiplication when num_rownnz is smaller than num_rows */
if (num_rownnz < xpar*(num_rows) || num_vectors > 1)
{
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = b_data[i]*temp;
}
}
else
{
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] = b_data[i];
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
if (num_rownnz < xpar*(num_rows))
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jj,m,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rownnz; i++)
{
m = A_rownnz[i];
/*
* for (jj = A_i[m]; jj < A_i[m+1]; jj++)
* {
* j = A_j[jj];
* y_data[m] += A_data[jj] * x_data[j];
* } */
if ( num_vectors==1 )
{
tempx = 0;
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[A_j[jj]];
y_data[m] += tempx;
}
else
for ( j=0; j<num_vectors; ++j )
{
tempx = 0;
for (jj = A_i[m]; jj < A_i[m+1]; jj++)
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
y_data[ j*vecstride_y + m*idxstride_y] += tempx;
}
}
}
else // num_vectors > 1
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jj,tempx) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (j = 0; j < num_vectors; ++j)
{
tempx = 0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[ j*vecstride_x + A_j[jj]*idxstride_x ];
}
y_data[ j*vecstride_y + i*idxstride_y ] += tempx;
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= alpha;
}
}
else
{ // JSP: this is currently the only path optimized
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,tempx)
#endif
{
HYPRE_Int iBegin = hypre_CSRMatrixGetLoadBalancedPartitionBegin(A);
HYPRE_Int iEnd = hypre_CSRMatrixGetLoadBalancedPartitionEnd(A);
hypre_assert(iBegin <= iEnd);
hypre_assert(iBegin >= 0 && iBegin <= num_rows);
hypre_assert(iEnd >= 0 && iEnd <= num_rows);
if (0 == temp)
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = 0.0;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*A*x
} // temp == 0
else if (-1 == temp) // beta == -alpha
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x - y
else if (-1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x + y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x - y)
} // temp == -1
else if (1 == temp)
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x + y
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x - y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x + y)
}
else
{
if (1 == alpha) // JSP: a common path
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = A*x + temp*y
else if (-1 == alpha)
{
for (i = iBegin; i < iEnd; i++)
{
tempx = -b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx -= A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = tempx;
}
} // y = -A*x - temp*y
else
{
for (i = iBegin; i < iEnd; i++)
{
tempx = b_data[i]*temp;
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
tempx += A_data[jj] * x_data[A_j[jj]];
}
y_data[i] = alpha*tempx;
}
} // y = alpha*(A*x + temp*y)
} // temp != 0 && temp != -1 && temp != 1
} // omp parallel
}
if (x == y)
{
hypre_SeqVectorDestroy(x_tmp);
}
#endif /* CPU */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATVEC] += hypre_MPI_Wtime() - time_begin;
#endif
return ierr;
}
HYPRE_Int
hypre_CSRMatrixMatvec( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
return hypre_CSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y, 0);
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvecT
*
* This version is using a different (more efficient) threading scheme
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatvecT( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y )
{
#if defined(HYPRE_USING_CUDA) /* CUDA */
HYPRE_Int ierr = hypre_CSRMatrixMatvecDevice(1, alpha, A, x, beta, y, y, 0 );
#elif defined(HYPRE_USING_DEVICE_OPENMP) /* OMP 4.5 */
HYPRE_Int ierr = hypre_CSRMatrixMatvecOutOfPlaceOOMP(1, alpha, A, x, beta, y, y, 0);
#else /* CPU */
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x);
HYPRE_Int idxstride_y = hypre_VectorIndexStride(y);
HYPRE_Int vecstride_y = hypre_VectorVectorStride(y);
HYPRE_Int idxstride_x = hypre_VectorIndexStride(x);
HYPRE_Int vecstride_x = hypre_VectorVectorStride(x);
HYPRE_Complex temp;
HYPRE_Complex *y_data_expand;
HYPRE_Int my_thread_num = 0, offset = 0;
HYPRE_Int i, j, jv, jj;
HYPRE_Int num_threads;
HYPRE_Int ierr = 0;
hypre_Vector *x_tmp = NULL;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( num_vectors == hypre_VectorNumVectors(y) );
if (num_rows != x_size)
ierr = 1;
if (num_cols != y_size)
ierr = 2;
if (num_rows != x_size && num_cols != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= beta;
return ierr;
}
if (x == y)
{
x_tmp = hypre_SeqVectorCloneDeep(x);
x_data = hypre_VectorData(x_tmp);
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
num_threads = hypre_NumThreads();
if (num_threads > 1)
{
y_data_expand = hypre_CTAlloc(HYPRE_Complex, num_threads*y_size, HYPRE_MEMORY_HOST);
if ( num_vectors==1 )
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,jj,j,my_thread_num,offset)
#endif
{
my_thread_num = hypre_GetThreadNum();
offset = y_size*my_thread_num;
#ifdef HYPRE_USING_OPENMP
#pragma omp for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data_expand[offset + j] += A_data[jj] * x_data[i];
}
}
/* implied barrier (for threads)*/
#ifdef HYPRE_USING_OPENMP
#pragma omp for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < y_size; i++)
{
for (j = 0; j < num_threads; j++)
{
y_data[i] += y_data_expand[j*y_size + i];
}
}
} /* end parallel threaded region */
}
else
{
/* multiple vector case is not threaded */
for (i = 0; i < num_rows; i++)
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x];
}
}
}
}
hypre_TFree(y_data_expand, HYPRE_MEMORY_HOST);
}
else
{
for (i = 0; i < num_rows; i++)
{
if ( num_vectors==1 )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[j] += A_data[jj] * x_data[i];
}
}
else
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
{
j = A_j[jj];
y_data[ j*idxstride_y + jv*vecstride_y ] +=
A_data[jj] * x_data[ i*idxstride_x + jv*vecstride_x ];
}
}
}
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
{
y_data[i] *= alpha;
}
}
if (x == y) hypre_SeqVectorDestroy(x_tmp);
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatvec_FF( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *y,
HYPRE_Int *CF_marker_x,
HYPRE_Int *CF_marker_y,
HYPRE_Int fpt )
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *y_data = hypre_VectorData(y);
HYPRE_Int x_size = hypre_VectorSize(x);
HYPRE_Int y_size = hypre_VectorSize(y);
HYPRE_Complex temp;
HYPRE_Int i, jj;
HYPRE_Int ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_cols != x_size)
ierr = 1;
if (num_rows != y_size)
ierr = 2;
if (num_cols != x_size && num_rows != y_size)
ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,jj) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
{
if (CF_marker_x[i] == fpt)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
if (CF_marker_y[A_j[jj]] == fpt) temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows; i++)
if (CF_marker_x[i] == fpt) y_data[i] *= alpha;
}
return ierr;
}
#if defined(HYPRE_USING_CUDA)
HYPRE_Int
hypre_CSRMatrixMatvecDevice( HYPRE_Int trans,
HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
#ifdef HYPRE_BIGINT
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR: hypre_CSRMatvecDevice should not be called when bigint is enabled!");
#else
cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle);
cusparseMatDescr_t descr = hypre_HandleCusparseMatDescr(hypre_handle);
hypre_CSRMatrixPrefetch(A, HYPRE_MEMORY_DEVICE);
hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
hypre_SeqVectorPrefetch(b, HYPRE_MEMORY_DEVICE);
if (b != y)
{
hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
}
if (b != y)
{
HYPRE_THRUST_CALL( copy_n, b->data, y->size-offset, y->data );
}
if (x == y)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR::x and y are the same pointer in hypre_CSRMatrixMatvecDevice\n");
}
// TODO
if (offset != 0)
{
hypre_printf("WARNING:: Offset is not zero in hypre_CSRMatrixMatvecDevice :: \n");
}
hypre_assert(offset == 0);
if (trans)
{
HYPRE_Complex *csc_a = hypre_TAlloc(HYPRE_Complex, A->num_nonzeros, HYPRE_MEMORY_DEVICE);
HYPRE_Int *csc_j = hypre_TAlloc(HYPRE_Int, A->num_nonzeros, HYPRE_MEMORY_DEVICE);
HYPRE_Int *csc_i = hypre_TAlloc(HYPRE_Int, A->num_cols+1, HYPRE_MEMORY_DEVICE);
HYPRE_CUSPARSE_CALL( cusparseDcsr2csc(handle, A->num_rows, A->num_cols, A->num_nonzeros,
A->data, A->i, A->j, csc_a, csc_j, csc_i,
CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO) );
HYPRE_CUSPARSE_CALL( cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
A->num_cols, A->num_rows, A->num_nonzeros,
&alpha, descr,
csc_a, csc_i, csc_j,
x->data, &beta, y->data) );
hypre_TFree(csc_a, HYPRE_MEMORY_DEVICE);
hypre_TFree(csc_i, HYPRE_MEMORY_DEVICE);
hypre_TFree(csc_j, HYPRE_MEMORY_DEVICE);
}
else
{
HYPRE_CUSPARSE_CALL( cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
A->num_rows-offset, A->num_cols, A->num_nonzeros,
&alpha, descr,
A->data, A->i+offset, A->j,
x->data, &beta, y->data+offset) );
}
hypre_SyncCudaComputeStream(hypre_handle);
#endif
return hypre_error_flag;
}
HYPRE_Int
hypre_CSRMatrixMatvecDeviceBIGINT( HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
#ifdef HYPRE_BIGINT
#error "TODO BigInt"
#endif
return 0;
}
#endif
|
GB_binop__isgt_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__isgt_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint8)
// A*D function (colscale): GB (_AxD__isgt_uint8)
// D*A function (rowscale): GB (_DxB__isgt_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint8)
// C=scalar+B GB (_bind1st__isgt_uint8)
// C=scalar+B' GB (_bind1st_tran__isgt_uint8)
// C=A+scalar GB (_bind2nd__isgt_uint8)
// C=A'+scalar GB (_bind2nd_tran__isgt_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_UINT8 || GxB_NO_ISGT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isgt_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
BRKGA.h | /**
* BRKGA.h
*
* This template class encapsulates a Biased Random-key Genetic Algorithm for minimization problems
* with K independent Populations stored in two vectors of Population, current and previous.
* It supports multi-threading via OpenMP, and implements the following key methods:
*
* - BRKGA() constructor: initializes the populations with parameters described below.
* - evolve() operator: evolve each Population following the BRKGA methodology. This method
* supports OpenMP to evolve up to K independent Populations in parallel.
* Please note that double Decoder::decode(...) MUST be thread-safe.
*
* Required parameters:
* - n: number of genes in each chromosome
* - p: number of elements in each population
* - pe: pct of elite items into each population
* - pm: pct of mutants introduced at each generation into the population
* - rhoe: probability that an offspring inherits the allele of its elite parent
*
* Optional parameters:
* - K: number of independent Populations (set to 1 if not supplied)
* - MAX_THREADS: number of threads to perform parallel decoding (set to 1 if not supplied)
* WARNING: Decoder::decode() MUST be thread-safe if MAX_THREADS > 1!
*
* The following objects are required upon declaration:
* RNG: random number generator that implements the methods below.
* - RNG(unsigned long seed) to initialize a new RNG with 'seed'
* - double rand() to return a double precision random deviate in range [0,1)
* - unsigned long randInt() to return a >=32-bit unsigned random deviate in range [0,2^32-1)
* - unsigned long randInt(N) to return a unsigned random deviate in range [0, N] with N < 2^32
*
* Decoder: problem-specific decoder that implements any of the decode methods outlined below. When
* compiling and linking BRKGA with -fopenmp (i.e., with multithreading support via
* OpenMP), the method must be thread-safe.
* - double decode(const vector< double >& chromosome) const, if you don't want to change
* chromosomes inside the framework, or
* - double decode(vector< double >& chromosome) const, if you'd like to update a chromosome.
* WARNING: even though both methods use const correctness to enforce that they are thread safe
* the use of mutable within the Decoder class could void such a feature! In other
* words, DO NOT use mutable within the decoder.
*
* Created on : Jun 22, 2010 by rtoso
* Last update: Sep 15, 2011 by rtoso
* Authors : Rodrigo Franco Toso <rtoso@cs.rutgers.edu>
* Mauricio G.C. Resende <mgcr@research.att.com>
*
* The MIT License (MIT)
*
* Copyright (c) 2018
* Rodrigo Franco Toso (rfrancotoso@gmail.com) and
* Mauricio G.C. Resende
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef BRKGA_H
#define BRKGA_H
#include <omp.h>
#include <algorithm>
#include <exception>
#include <stdexcept>
#include "Population.h"
template< class Decoder, class RNG >
class BRKGA {
public:
/*
* Default constructor
* Required hyperparameters:
* - n: number of genes in each chromosome
* - p: number of elements in each population
* - pe: pct of elite items into each population
* - pm: pct of mutants introduced at each generation into the population
* - rhoe: probability that an offspring inherits the allele of its elite parent
*
* Optional parameters:
* - K: number of independent Populations
* - MAX_THREADS: number of threads to perform parallel decoding
* WARNING: Decoder::decode() MUST be thread-safe; safe if implemented as
* + double Decoder::decode(std::vector< double >& chromosome) const
*/
BRKGA(unsigned n, unsigned p, double pe, double pm, double rhoe, const Decoder& refDecoder,
RNG& refRNG, unsigned K = 1, unsigned MAX_THREADS = 1) throw(std::range_error);
/**
* Destructor
*/
~BRKGA();
/**
* Resets all populations with brand new keys
*/
void reset();
/**
* Evolve the current populations following the guidelines of BRKGAs
* @param generations number of generations (must be even and nonzero)
* @param J interval to exchange elite chromosomes (must be even; 0 ==> no synchronization)
* @param M number of elite chromosomes to select from each population in order to exchange
*/
void evolve(unsigned generations = 1);
/**
* Exchange elite-solutions between the populations
* @param M number of elite chromosomes to select from each population
*/
void exchangeElite(unsigned M) throw(std::range_error);
/**
* Returns the current population
*/
const Population& getPopulation(unsigned k = 0) const;
/**
* Returns the chromosome with best fitness so far among all populations
*/
const std::vector< double >& getBestChromosome() const;
/**
* Returns the best fitness found so far among all populations
*/
double getBestFitness() const;
// Return copies to the internal parameters:
unsigned getN() const;
unsigned getP() const;
unsigned getPe() const;
unsigned getPm() const;
unsigned getPo() const;
double getRhoe() const;
unsigned getK() const;
unsigned getMAX_THREADS() const;
private:
// I don't see any reason to pimpl the internal methods and data, so here they are:
// Hyperparameters:
const unsigned n; // number of genes in the chromosome
const unsigned p; // number of elements in the population
const unsigned pe; // number of elite items in the population
const unsigned pm; // number of mutants introduced at each generation into the population
const double rhoe; // probability that an offspring inherits the allele of its elite parent
// Templates:
RNG& refRNG; // reference to the random number generator
const Decoder& refDecoder; // reference to the problem-dependent Decoder
// Parallel populations parameters:
const unsigned K; // number of independent parallel populations
const unsigned MAX_THREADS; // number of threads for parallel decoding
// Data:
std::vector< Population* > previous; // previous populations
std::vector< Population* > current; // current populations
// Local operations:
void initialize(const unsigned i); // initialize current population 'i' with random keys
void evolution(Population& curr, Population& next);
bool isRepeated(const std::vector< double >& chrA, const std::vector< double >& chrB) const;
};
template< class Decoder, class RNG >
BRKGA< Decoder, RNG >::BRKGA(unsigned _n, unsigned _p, double _pe, double _pm, double _rhoe,
const Decoder& decoder, RNG& rng, unsigned _K, unsigned MAX) throw(std::range_error) :
n(_n), p(_p), pe(unsigned(_pe * p)), pm(unsigned(_pm * p)), rhoe(_rhoe), refRNG(rng),
refDecoder(decoder), K(_K), MAX_THREADS(MAX), previous(K, 0), current(K, 0) {
// Error check:
using std::range_error;
if(n == 0) { throw range_error("Chromosome size equals zero."); }
if(p == 0) { throw range_error("Population size equals zero."); }
if(pe == 0) { throw range_error("Elite-set size equals zero."); }
if(pe > p) { throw range_error("Elite-set size greater than population size (pe > p)."); }
if(pm > p) { throw range_error("Mutant-set size (pm) greater than population size (p)."); }
if(pe + pm > p) { throw range_error("elite + mutant sets greater than population size (p)."); }
if(K == 0) { throw range_error("Number of parallel populations cannot be zero."); }
// Initialize and decode each chromosome of the current population, then copy to previous:
for(unsigned i = 0; i < K; ++i) {
// Allocate:
current[i] = new Population(n, p);
// Initialize:
initialize(i);
// Then just copy to previous:
previous[i] = new Population(*current[i]);
}
}
template< class Decoder, class RNG >
BRKGA< Decoder, RNG >::~BRKGA() {
for(unsigned i = 0; i < K; ++i) { delete current[i]; delete previous[i]; }
}
template< class Decoder, class RNG >
const Population& BRKGA< Decoder, RNG >::getPopulation(unsigned k) const {
#ifdef RANGECHECK
if(k >= K) { throw std::range_error("Invalid population identifier."); }
#endif
return (*current[k]);
}
template< class Decoder, class RNG >
double BRKGA< Decoder, RNG >::getBestFitness() const {
double best = current[0]->fitness[0].first;
for(unsigned i = 1; i < K; ++i) {
if(current[i]->fitness[0].first < best) { best = current[i]->fitness[0].first; }
}
return best;
}
template< class Decoder, class RNG >
const std::vector< double >& BRKGA< Decoder, RNG >::getBestChromosome() const {
unsigned bestK = 0;
for(unsigned i = 1; i < K; ++i) {
if( current[i]->getBestFitness() < current[bestK]->getBestFitness() ) { bestK = i; }
}
return current[bestK]->getChromosome(0); // The top one :-)
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::reset() {
for(unsigned i = 0; i < K; ++i) { initialize(i); }
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::evolve(unsigned generations) {
#ifdef RANGECHECK
if(generations == 0) { throw std::range_error("Cannot evolve for 0 generations."); }
#endif
for(unsigned i = 0; i < generations; ++i) {
for(unsigned j = 0; j < K; ++j) {
evolution(*current[j], *previous[j]); // First evolve the population (curr, next)
std::swap(current[j], previous[j]); // Update (prev = curr; curr = prev == next)
}
}
}
template< class Decoder, class RNG >
void BRKGA< Decoder, RNG >::exchangeElite(unsigned M) throw(std::range_error) {
#ifdef RANGECHECK
if(M == 0 || M >= p) { throw std::range_error("M cannot be zero or >= p."); }
#endif
for(unsigned i = 0; i < K; ++i) {
// Population i will receive some elite members from each Population j below:
unsigned dest = p - 1; // Last chromosome of i (will be updated below)
for(unsigned j = 0; j < K; ++j) {
if(j == i) { continue; }
// Copy the M best of Population j into Population i:
for(unsigned m = 0; m < M; ++m) {
// Copy the m-th best of Population j into the 'dest'-th position of Population i:
const std::vector< double >& bestOfJ = current[j]->getChromosome(m);
std::copy(bestOfJ.begin(), bestOfJ.end(), current[i]->getChromosome(dest).begin());
current[i]->fitness[dest].first = current[j]->fitness[m].first;
--dest;
}
}
}
for(int j = 0; j < int(K); ++j) { current[j]->sortFitness(); }
}
template< class Decoder, class RNG >
inline void BRKGA< Decoder, RNG >::initialize(const unsigned i) {
for(unsigned j = 0; j < p; ++j) {
for(unsigned k = 0; k < n; ++k) { (*current[i])(j, k) = refRNG.rand(); }
}
//DAYVSON MEXE AQUI
// Decode:
#ifdef _OPENMP
#pragma omp parallel for num_threads(MAX_THREADS)
#endif
for(int j = 0; j < int(p); ++j) {
current[i]->setFitness(j, refDecoder.decode((*current[i])(j)) );
}
// Sort:
current[i]->sortFitness();
}
template< class Decoder, class RNG >
inline void BRKGA< Decoder, RNG >::evolution(Population& curr, Population& next) {
// We now will set every chromosome of 'current', iterating with 'i':
unsigned i = 0; // Iterate chromosome by chromosome
unsigned j = 0; // Iterate allele by allele
// 2. The 'pe' best chromosomes are maintained, so we just copy these into 'current':
while(i < pe) {
for(j = 0 ; j < n; ++j) { next(i,j) = curr(curr.fitness[i].second, j); }
next.fitness[i].first = curr.fitness[i].first;
next.fitness[i].second = i;
++i;
}
// 3. We'll mate 'p - pe - pm' pairs; initially, i = pe, so we need to iterate until i < p - pm:
while(i < p - pm) {
// Select an elite parent:
const unsigned eliteParent = (refRNG.randInt(pe - 1));
// Select a non-elite parent:
const unsigned noneliteParent = pe + (refRNG.randInt(p - pe - 1));
// Mate:
for(j = 0; j < n; ++j) {
const unsigned& sourceParent = ((refRNG.rand() < rhoe) ? eliteParent : noneliteParent);
next(i, j) = curr(curr.fitness[sourceParent].second, j);
}
++i;
}
// We'll introduce 'pm' mutants:
while(i < p) {
for(j = 0; j < n; ++j) { next(i, j) = refRNG.rand(); }
++i;
}
// Time to compute fitness, in parallel:
#ifdef _OPENMP
#pragma omp parallel for num_threads(MAX_THREADS)
#endif
for(int i = int(pe); i < int(p); ++i) {
next.setFitness( i, refDecoder.decode(next.population[i]) );
}
// Now we must sort 'current' by fitness, since things might have changed:
next.sortFitness();
}
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getN() const { return n; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getP() const { return p; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getPe() const { return pe; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getPm() const { return pm; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getPo() const { return p - pe - pm; }
template< class Decoder, class RNG >
double BRKGA<Decoder, RNG>::getRhoe() const { return rhoe; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getK() const { return K; }
template< class Decoder, class RNG >
unsigned BRKGA<Decoder, RNG>::getMAX_THREADS() const { return MAX_THREADS; }
#endif
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(12*t1+Ny+21,32)),floord(24*t2+Ny+20,32)),floord(24*t1-24*t2+Nz+Ny+19,32));t3++) {
for (t4=max(max(max(0,ceild(3*t1-511,512)),ceild(24*t2-Nz-2044,2048)),ceild(32*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(12*t1+Nx+21,2048)),floord(24*t2+Nx+20,2048)),floord(32*t3+Nx+28,2048)),floord(24*t1-24*t2+Nz+Nx+19,2048));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),32*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),32*t3+30),2048*t4+2046),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
analyze.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% AAA N N AAA L Y Y ZZZZZ EEEEE %
% A A NN N A A L Y Y ZZ E %
% AAAAA N N N AAAAA L Y ZZZ EEE %
% A A N NN A A L Y ZZ E %
% A A N N A A LLLLL Y ZZZZZ EEEEE %
% %
% Analyze An Image %
% %
% Software Design %
% Bill Corbis %
% December 1998 %
% %
% %
% Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
*/
/*
Include declarations.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include <math.h>
#include "magick/MagickCore.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% a n a l y z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% analyzeImage() computes the brightness and saturation mean, standard
% deviation, kurtosis and skewness and stores these values as attributes
% of the image.
%
% The format of the analyzeImage method is:
%
% size_t analyzeImage(Image *images,const int argc,
% char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
ModuleExport size_t analyzeImage(Image **images,const int argc,
const char **argv,ExceptionInfo *exception)
{
char
text[MaxTextExtent];
double
area,
brightness,
brightness_mean,
brightness_standard_deviation,
brightness_kurtosis,
brightness_skewness,
brightness_sum_x,
brightness_sum_x2,
brightness_sum_x3,
brightness_sum_x4,
hue,
saturation,
saturation_mean,
saturation_standard_deviation,
saturation_kurtosis,
saturation_skewness,
saturation_sum_x,
saturation_sum_x2,
saturation_sum_x3,
saturation_sum_x4;
Image
*image;
assert(images != (Image **) NULL);
assert(*images != (Image *) NULL);
assert((*images)->signature == MagickSignature);
(void) argc;
(void) argv;
image=(*images);
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
{
CacheView
*image_view;
ssize_t
y;
MagickBooleanType
status;
brightness_sum_x=0.0;
brightness_sum_x2=0.0;
brightness_sum_x3=0.0;
brightness_sum_x4=0.0;
brightness_mean=0.0;
brightness_standard_deviation=0.0;
brightness_kurtosis=0.0;
brightness_skewness=0.0;
saturation_sum_x=0.0;
saturation_sum_x2=0.0;
saturation_sum_x3=0.0;
saturation_sum_x4=0.0;
saturation_mean=0.0;
saturation_standard_deviation=0.0;
saturation_kurtosis=0.0;
saturation_skewness=0.0;
area=0.0;
status=MagickTrue;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ConvertRGBToHSB(GetPixelRed(p),GetPixelGreen(p),GetPixelBlue(p),
&hue,&saturation,&brightness);
brightness*=QuantumRange;
brightness_sum_x+=brightness;
brightness_sum_x2+=brightness*brightness;
brightness_sum_x3+=brightness*brightness*brightness;
brightness_sum_x4+=brightness*brightness*brightness*brightness;
saturation*=QuantumRange;
saturation_sum_x+=saturation;
saturation_sum_x2+=saturation*saturation;
saturation_sum_x3+=saturation*saturation*saturation;
saturation_sum_x4+=saturation*saturation*saturation*saturation;
area++;
p++;
}
}
image_view=DestroyCacheView(image_view);
if (area <= 0.0)
break;
brightness_mean=brightness_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_mean);
(void) SetImageProperty(image,"filter:brightness:mean",text);
brightness_standard_deviation=sqrt(brightness_sum_x2/area-(brightness_sum_x/
area*brightness_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
brightness_standard_deviation);
(void) SetImageProperty(image,"filter:brightness:standard-deviation",text);
if (brightness_standard_deviation != 0)
brightness_kurtosis=(brightness_sum_x4/area-4.0*brightness_mean*
brightness_sum_x3/area+6.0*brightness_mean*brightness_mean*
brightness_sum_x2/area-3.0*brightness_mean*brightness_mean*
brightness_mean*brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation*
brightness_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_kurtosis);
(void) SetImageProperty(image,"filter:brightness:kurtosis",text);
if (brightness_standard_deviation != 0)
brightness_skewness=(brightness_sum_x3/area-3.0*brightness_mean*
brightness_sum_x2/area+2.0*brightness_mean*brightness_mean*
brightness_mean)/(brightness_standard_deviation*
brightness_standard_deviation*brightness_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",brightness_skewness);
(void) SetImageProperty(image,"filter:brightness:skewness",text);
saturation_mean=saturation_sum_x/area;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_mean);
(void) SetImageProperty(image,"filter:saturation:mean",text);
saturation_standard_deviation=sqrt(saturation_sum_x2/area-(saturation_sum_x/
area*saturation_sum_x/area));
(void) FormatLocaleString(text,MaxTextExtent,"%g",
saturation_standard_deviation);
(void) SetImageProperty(image,"filter:saturation:standard-deviation",text);
if (saturation_standard_deviation != 0)
saturation_kurtosis=(saturation_sum_x4/area-4.0*saturation_mean*
saturation_sum_x3/area+6.0*saturation_mean*saturation_mean*
saturation_sum_x2/area-3.0*saturation_mean*saturation_mean*
saturation_mean*saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation*
saturation_standard_deviation)-3.0;
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_kurtosis);
(void) SetImageProperty(image,"filter:saturation:kurtosis",text);
if (saturation_standard_deviation != 0)
saturation_skewness=(saturation_sum_x3/area-3.0*saturation_mean*
saturation_sum_x2/area+2.0*saturation_mean*saturation_mean*
saturation_mean)/(saturation_standard_deviation*
saturation_standard_deviation*saturation_standard_deviation);
(void) FormatLocaleString(text,MaxTextExtent,"%g",saturation_skewness);
(void) SetImageProperty(image,"filter:saturation:skewness",text);
}
return(MagickImageFilterSignature);
}
|
convolution-2d.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(j) collapse(2) schedule(#P1, #P2) num_threads(#P3)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
histogram.h | #pragma once
#include <sstream>
#include "util/pretty_print.h"
template<typename T>
vector<int32_t> core_val_histogram(int n, T &core, bool is_print = false) {
Timer histogram_timer;
// core-value histogram
int max_core_val = 0;
vector<int32_t> histogram;
#pragma omp parallel
{
#pragma omp for reduction(max:max_core_val)
for (auto u = 0; u < n; u++) {
max_core_val = max(max_core_val, core[u]);
}
#pragma omp single
{
log_info("max value: %d", max_core_val);
histogram = vector<int32_t>(max_core_val + 1, 0);
}
vector<int32_t> local_histogram(histogram.size());
#pragma omp for
for (auto u = 0; u < n; u++) {
auto core_val = core[u];
local_histogram[core_val]++;
}
// local_histogram[i] is immutable here.
for (auto i = 0; i < local_histogram.size(); i++) {
#pragma omp atomic
histogram[i] += local_histogram[i];
}
}
if (is_print) {
if (histogram.size() < 400) {
stringstream ss;
ss << pretty_print_array(&histogram.front(), histogram.size());
log_info("values histogram: %s", ss.str().c_str());
} else {
{
stringstream ss;
ss << pretty_print_array(&histogram.front(), 100);
log_info("first100 values histogram: %s", ss.str().c_str());
}
{
stringstream ss;
ss << pretty_print_array(&histogram.front() + histogram.size() - 100, 100);
log_info("last100 values histogram: %s", ss.str().c_str());
}
}
}
log_info("Histogram Time: %.9lf s", histogram_timer.elapsed());
auto &bins = histogram;
auto bin_cnt = 0;
int64_t acc = 0;
auto thresh = n / 10;
auto last = 0;
for (auto i = 0; i < histogram.size(); i++) {
if (bins[i] > 0) {
bin_cnt++;
acc += bins[i];
if (acc > thresh || i == histogram.size() - 1) {
log_info("bin[%d - %d]: %'lld", last, i, acc);
last = i + 1;
acc = 0;
}
}
}
log_info("Reversed Bins...");
last = histogram.size() - 1;
acc = 0;
for (int32_t i = histogram.size() - 1; i > -1; i--) {
if (bins[i] > 0) {
bin_cnt++;
acc += bins[i];
if (acc > thresh || i == 0) {
log_info("bin[%d - %d]: %'lld", i, last, acc);
last = i + 1;
acc = 0;
}
}
}
log_info("total bin counts: %d", bin_cnt);
return histogram;
} |
pr4.c | //Write an OpenMP program to find the number of processes, number of threads, etc (the environment information).
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
int main (int argc, char *argv[])
{
int nthreads, tid, procs, maxt, inpar, dynamic, nested;
/* Start parallel region */
#pragma omp parallel private(nthreads, tid)
{
/* Obtain thread number */
tid = omp_get_thread_num();
/* Only master thread does this */
if (tid == 0)
{
printf("Thread %d getting environment info...\n", tid);
/* Get environment information */
procs = omp_get_num_procs();
nthreads = omp_get_num_threads();
maxt = omp_get_max_threads();
inpar = omp_in_parallel();
dynamic = omp_get_dynamic();
nested = omp_get_nested();
/* Print environment information */
printf("Number of processors = %d\n", procs);
printf("Number of threads = %d\n", nthreads);
printf("Max threads = %d\n", maxt);
printf("In parallel? = %d\n", inpar);
printf("Dynamic threads enabled? = %d\n", dynamic);
printf("Nested parallelism supported? = %d\n", nested);
}
} /* Done */
}
|
elemwise_binary_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template<typename OP, int Req>
struct MissingRValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template<typename OP, int Req>
struct MissingLValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType {
kTempSpace
};
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template<typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu> *s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType> *out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
public:
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
private:
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseNone_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1)
/ DataType<DType>::kLanes);
const DType *ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType *lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType *rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseIn_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>();
const DType *ograd_dptr = inputs[0].dptr<DType>();
const DType *lhs_dptr = inputs[1].dptr<DType>();
const DType *rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const int size = static_cast<int>(
(outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch(
s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const int size = static_cast<int>(
(outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch(
s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
}
template<
typename xpu,
typename LOP,
typename ROP,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void RspRspOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
RspRspOp<LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0],
false, false, false, false);
// lhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0],
false, false, true, false);
}
// rhs grad
if (req[1] != kNullOp) {
RspRspOp<ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1],
false, false, false, false);
// rhs in-place
RspRspOp<op::mshadow_op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1],
false, false, true, false);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void DnsCsrCsrOpBackward(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
const bool supported_ops = std::is_same<mshadow_op::right, LOP>::value &&
std::is_same<mshadow_op::left, ROP>::value;
CHECK(supported_ops)
<< "Only backward for mul is supported (LOP should be right, ROP should be left)";
const NDArray& out_grad = inputs[0];
const NDArray& lhs_in = inputs[1];
const NDArray& rhs_in = inputs[2];
const NDArray& lhs_grad = outputs[0];
const NDArray& rhs_grad = outputs[1];
const bool reverse = (outputs[0].storage_type() == kCSRStorage);
if (reverse) {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, rhs_in, req[0], lhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), lhs_in.data()}, {req[1]},
{rhs_grad.data()});
} else {
DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, lhs_in, req[1], rhs_grad, false);
Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), rhs_in.data()}, {req[0]},
{lhs_grad.data()});
}
}
public:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename OP>
static void RspRspOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void CsrCsrOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void DnsCsrDnsOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static void DnsCsrDnsOp(mshadow::Stream<gpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsCsrCsrOp(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
/*! \brief DNS -op- RSP binary operator for non-canonical NDArray */
template<typename xpu, typename OP>
static void DnsRspDnsOp(mshadow::Stream<xpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
/*!
* \brief Allow one of the binary inputs to be dense and still produce a sparse output.
* Typically used for sparse * dense = sparse.
* Note: for csr, it dispatches to fallback other than csr, csr -> csr
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns -> dns
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) {
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) {
// csr, dns -> csr
// dns, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
/*!
* \brief Allow one of the inputs to be dense and produce a dense output,
* for rsp inputs only support when both inputs are rsp type.
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template<bool cpu_only, bool rsp, bool csr>
static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2);
CHECK_EQ(out_attrs->size(), 1);
const auto lhs_stype = (*in_attrs)[0];
const auto rhs_stype = (*in_attrs)[1];
bool dispatched = false;
const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns ... -> dns
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp, ... -> rsp
dispatched = storage_type_assign(out_attrs, kRowSparseStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr, ... -> csr
dispatched = storage_type_assign(out_attrs, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) ||
(lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) {
// dense, csr -> dense / csr, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) {
// dense, rsp -> dense / rsp, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
template<typename xpu, typename OP>
static void ComputeInt(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
if (outputs[0].type_flag_ == mshadow::kBool) {
LOG(FATAL) << "Operator " << attrs.op->name << " does not support boolean type";
}
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeWithBool(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeLogic(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<bool>(),
inputs[0].dptr<DType>(),
inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp) return;
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
if (size != 0) {
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
}
});
});
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) &&
(out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
} else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kRowSparseStorage);
const NDArray& rsp = (reverse)? inputs[0] : inputs[1];
DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
RspRspOp<OP>(
s, attrs, ctx, inputs[0], inputs[1],
req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kCSRStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<
typename xpu, typename LOP, typename ROP,
bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false>
static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto out_grad_stype = inputs[0].storage_type();
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
RspRspOpBackward<xpu, LOP, ROP, in0_ok_dense, in1_ok_dense, in2_ok_dense>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
}
if (((lhs_grad_stype == kDefaultStorage && rhs_grad_stype == kCSRStorage) ||
(lhs_grad_stype == kCSRStorage && rhs_grad_stype == kDefaultStorage)) &&
out_grad_stype == kDefaultStorage) {
// dns, csr, dns -> [csr, dns] / csr, dns, dns -> [dns, csr]
DnsCsrCsrOpBackward<xpu, LOP, ROP>(attrs, ctx, inputs, req, outputs);
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<mxnet::alm::FChangeLayout>("FChangeLayout", ElemwiseChangeLayout) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, with FComputeEx for csr and rsp available.
when inputs contain both sparse and dense, sparse output is preferred. */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferSparseStorageType) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
/*! \brief Binary launch, with FComputeEx for prefer dense */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
// TODO SYCL Integration header approach relies on an assumption that kernel
// lambda objects created by the host compiler and any of the device compilers
// will be identical wrt to field types, order and offsets. Some verification
// mechanism should be developed to enforce that.
// TODO FIXME SYCL Support for SYCL in FE should be refactored:
// - kernel identification and generation should be made a separate pass over
// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl +
// FunctionTemplateDecl::getSpecializations() mechanism could be used for that.
// - All SYCL stuff on Sema level should be encapsulated into a single Sema
// field
// - Move SYCL stuff into a separate header
// Represents contents of a SYCL integration header file produced by a SYCL
// device compiler and used by SYCL host compiler (via forced inclusion into
// compiled SYCL source):
// - SYCL kernel names
// - SYCL kernel parameters and offsets of corresponding actual arguments
class SYCLIntegrationHeader {
public:
// Kind of kernel's parameters as captured by the compiler in the
// kernel lambda or function object
enum kernel_param_kind_t {
kind_first,
kind_accessor = kind_first,
kind_std_layout,
kind_sampler,
kind_pointer,
kind_last = kind_pointer
};
public:
SYCLIntegrationHeader(DiagnosticsEngine &Diag, bool UnnamedLambdaSupport,
Sema &S);
/// Emits contents of the header into given stream.
void emit(raw_ostream &Out);
/// Emits contents of the header into a file with given name.
/// Returns true/false on success/failure.
bool emit(const StringRef &MainSrc);
/// Signals that subsequent parameter descriptor additions will go to
/// the kernel with given name. Starts new kernel invocation descriptor.
void startKernel(StringRef KernelName, QualType KernelNameType,
StringRef KernelStableName, SourceLocation Loc,
bool IsESIMD);
/// Adds a kernel parameter descriptor to current kernel invocation
/// descriptor.
void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset);
/// Signals that addition of parameter descriptors to current kernel
/// invocation descriptor has finished.
void endKernel();
/// Registers a specialization constant to emit info for it into the header.
void addSpecConstant(StringRef IDName, QualType IDType);
/// Note which free functions (this_id, this_item, etc) are called within the
/// kernel
void setCallsThisId(bool B);
void setCallsThisItem(bool B);
void setCallsThisNDItem(bool B);
void setCallsThisGroup(bool B);
private:
// Kernel actual parameter descriptor.
struct KernelParamDesc {
// Represents a parameter kind.
kernel_param_kind_t Kind = kind_last;
// If Kind is kind_scalar or kind_struct, then
// denotes parameter size in bytes (includes padding for structs)
// If Kind is kind_accessor
// denotes access target; possible access targets are defined in
// access/access.hpp
int Info = 0;
// Offset of the captured parameter value in the lambda or function object.
unsigned Offset = 0;
KernelParamDesc() = default;
};
// there are four free functions the kernel may call (this_id, this_item,
// this_nd_item, this_group)
struct KernelCallsSYCLFreeFunction {
bool CallsThisId;
bool CallsThisItem;
bool CallsThisNDItem;
bool CallsThisGroup;
};
// Kernel invocation descriptor
struct KernelDesc {
/// Kernel name.
std::string Name;
/// Kernel name type.
QualType NameType;
/// Kernel name with stable lambda name mangling
std::string StableName;
SourceLocation KernelLocation;
/// Whether this kernel is an ESIMD one.
bool IsESIMDKernel;
/// Descriptor of kernel actual parameters.
SmallVector<KernelParamDesc, 8> Params;
// Whether kernel calls any of the SYCL free functions (this_item(),
// this_id(), etc)
KernelCallsSYCLFreeFunction FreeFunctionCalls;
KernelDesc() = default;
};
/// Returns the latest invocation descriptor started by
/// SYCLIntegrationHeader::startKernel
KernelDesc *getCurKernelDesc() {
return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1]
: nullptr;
}
private:
/// Keeps invocation descriptors for each kernel invocation started by
/// SYCLIntegrationHeader::startKernel
SmallVector<KernelDesc, 4> KernelDescs;
using SpecConstID = std::pair<QualType, std::string>;
/// Keeps specialization constants met in the translation unit. Maps spec
/// constant's ID type to generated unique name. Duplicates are removed at
/// integration header emission time.
llvm::SmallVector<SpecConstID, 4> SpecConsts;
/// Whether header is generated with unnamed lambda support
bool UnnamedLambdaSupport;
Sema &S;
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
// #pragma pack and align.
class AlignPackInfo {
public:
// `Native` represents default align mode, which may vary based on the
// platform.
enum Mode : unsigned char { Native, Natural, Packed, Mac68k };
// #pragma pack info constructor
AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL)
: PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) {
assert(Num == PackNumber && "The pack number has been truncated.");
}
// #pragma align info constructor
AlignPackInfo(AlignPackInfo::Mode M, bool IsXL)
: PackAttr(false), AlignMode(M),
PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {}
explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {}
AlignPackInfo() : AlignPackInfo(Native, false) {}
// When a AlignPackInfo itself cannot be used, this returns an 32-bit
// integer encoding for it. This should only be passed to
// AlignPackInfo::getFromRawEncoding, it should not be inspected directly.
static uint32_t getRawEncoding(const AlignPackInfo &Info) {
std::uint32_t Encoding{};
if (Info.IsXLStack())
Encoding |= IsXLMask;
Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1;
if (Info.IsPackAttr())
Encoding |= PackAttrMask;
Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4;
return Encoding;
}
static AlignPackInfo getFromRawEncoding(unsigned Encoding) {
bool IsXL = static_cast<bool>(Encoding & IsXLMask);
AlignPackInfo::Mode M =
static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1);
int PackNumber = (Encoding & PackNumMask) >> 4;
if (Encoding & PackAttrMask)
return AlignPackInfo(M, PackNumber, IsXL);
return AlignPackInfo(M, IsXL);
}
bool IsPackAttr() const { return PackAttr; }
bool IsAlignAttr() const { return !PackAttr; }
Mode getAlignMode() const { return AlignMode; }
unsigned getPackNumber() const { return PackNumber; }
bool IsPackSet() const {
// #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack
// attriute on a decl.
return PackNumber != UninitPackVal && PackNumber != 0;
}
bool IsXLStack() const { return XLStack; }
bool operator==(const AlignPackInfo &Info) const {
return std::tie(AlignMode, PackNumber, PackAttr, XLStack) ==
std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr,
Info.XLStack);
}
bool operator!=(const AlignPackInfo &Info) const {
return !(*this == Info);
}
private:
/// \brief True if this is a pragma pack attribute,
/// not a pragma align attribute.
bool PackAttr;
/// \brief The alignment mode that is in effect.
Mode AlignMode;
/// \brief The pack number of the stack.
unsigned char PackNumber;
/// \brief True if it is a XL #pragma align/pack stack.
bool XLStack;
/// \brief Uninitialized pack value.
static constexpr unsigned char UninitPackVal = -1;
// Masks to encode and decode an AlignPackInfo.
static constexpr uint32_t IsXLMask{0x0000'0001};
static constexpr uint32_t AlignModeMask{0x0000'0006};
static constexpr uint32_t PackAttrMask{0x00000'0008};
static constexpr uint32_t PackNumMask{0x0000'01F0};
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
PragmaStack<AlignPackInfo> AlignPackStack;
// The current #pragma align/pack values and locations at each #include.
struct AlignPackIncludeState {
AlignPackInfo CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. ImmediateDiagBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class ImmediateDiagBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op
// in that case anwyay.
ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default;
~ImmediateDiagBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First clear the diagnostic
// builder itself so it won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template <typename T>
friend const ImmediateDiagBuilder &
operator<<(const ImmediateDiagBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const ImmediateDiagBuilder &operator<<(T &&V) const {
const DiagnosticBuilder &BaseDiag = *this;
BaseDiag << std::move(V);
return *this;
}
};
/// A generic diagnostic builder for errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class SemaDiagnosticBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D);
SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default;
~SemaDiagnosticBuilder();
bool isImmediate() const { return ImmediateDiag.hasValue(); }
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (SemaDiagnosticBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a SemaDiagnosticBuilder yourself.
operator bool() const { return isImmediate(); }
template <typename T>
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const SemaDiagnosticBuilder &operator<<(T &&V) const {
if (ImmediateDiag.hasValue())
*ImmediateDiag << std::move(V);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].second << std::move(V);
return *this;
}
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) {
if (Diag.ImmediateDiag.hasValue())
PD.Emit(*Diag.ImmediateDiag);
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second = PD;
return Diag;
}
void AddFixItHint(const FixItHint &Hint) const {
if (ImmediateDiag.hasValue())
ImmediateDiag->AddFixItHint(Hint);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].second.AddFixItHint(Hint);
}
friend ExprResult ExprError(const SemaDiagnosticBuilder &) {
return ExprError();
}
friend StmtResult StmtError(const SemaDiagnosticBuilder &) {
return StmtError();
}
operator ExprResult() const { return ExprError(); }
operator StmtResult() const { return StmtError(); }
operator TypeResult() const { return TypeError(); }
operator DeclResult() const { return DeclResult(true); }
operator MemInitResult() const { return MemInitResult(true); }
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<ImmediateDiagBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Is the last error level diagnostic immediate. This is used to determined
/// whether the next info diagnostic should be immediate.
bool IsLastErrorImmediate = true;
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID,
bool DeferHint = false);
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD,
bool DeferHint = false);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
/// Whether uncompilable error has occurred. This includes error happens
/// in deferred diagnostics.
bool hasUncompilableErrorOccurred() const;
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
SYCLIntelFPGAIVDepAttr *
BuildSYCLIntelFPGAIVDepAttr(const AttributeCommonInfo &CI, Expr *Expr1,
Expr *Expr2);
template <typename FPGALoopAttrT>
FPGALoopAttrT *BuildSYCLIntelFPGALoopAttr(const AttributeCommonInfo &A,
Expr *E = nullptr);
LoopUnrollHintAttr *BuildLoopUnrollHintAttr(const AttributeCommonInfo &A,
Expr *E);
OpenCLUnrollHintAttr *
BuildOpenCLLoopUnrollHintAttr(const AttributeCommonInfo &A, Expr *E);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal argument for the
/// swift_name attribute applied to decl \p D. Raise a diagnostic if the name
/// is invalid for the given declaration.
///
/// \p AL is used to provide caret diagnostics in case of a malformed name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
const ParsedAttr &AL, bool IsAsync);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
// When loading a non-modular PCH files, this is used to restore module
// visibility.
void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) {
VisibleModules.setVisible(Mod, ImportLoc);
}
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
/// Get the type of expression E, triggering instantiation to complete the
/// type if necessary -- that is, if the expression refers to a templated
/// static data member of incomplete array type.
///
/// May still return an incomplete type if instantiation was not possible or
/// if the type is incomplete for a different reason. Use
/// RequireCompleteExprType instead if a diagnostic is expected for an
/// incomplete expression type.
QualType getCompletedType(Expr *E);
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param,
Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
ExprResult ActOnRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL);
EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D,
const EnforceTCBLeafAttr &AL);
SYCLIntelLoopFuseAttr *
mergeSYCLIntelLoopFuseAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool IsStringInit(Expr *Init, const ArrayType *AT);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE,
NamedDecl *Dest = nullptr);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
void AddOverloadedCallCandidates(
LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
bool AllowRecovery = false);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplatePack,
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult
LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys,
bool AllowRaw, bool AllowTemplate,
bool AllowStringTemplate, bool DiagnoseMissing,
StringLiteral *StringLit = nullptr);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID,
SourceLocation Loc);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnAfterCompoundStatementLeadingPragmas();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
bool CheckRebuiltAttributedStmtAttributes(ArrayRef<const Attr *> Attrs);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseDependentMemberLookup(LookupResult &R);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc,
ExprResult RequiresClause);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType,
CallingConv CC);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
static NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
// A requires-clause.
UPPC_RequiresClause,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaAlignPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaAlignPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName, int SectionFlags,
NamedDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called on well formed '\#pragma clang fp' that has option 'exceptions'.
void ActOnPragmaFPExceptions(SourceLocation Loc,
LangOptions::FPExceptionModeKind);
/// Called to set constant rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
template <typename AttrType>
bool checkRangedIntegralArgument(Expr *E, const AttrType *TmpAttr,
ExprResult &Result);
template <typename AttrType>
void AddOneConstantValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
template <typename AttrType>
void AddOneConstantPowerTwoValueAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
void AddIntelFPGABankBitsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
template <typename AttrType>
void addIntelSYCLSingleArgFunctionAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
template <typename AttrType>
void addIntelSYCLTripleArgFunctionAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr,
Expr *ZDimExpr);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D.
void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Annot, MutableArrayRef<Expr *> Args);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addSYCLIntelPipeIOAttr - Adds a pipe I/O attribute to a particular
/// declaration.
void addSYCLIntelPipeIOAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ID);
void addSYCLIntelLoopFuseAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
bool checkAllowedSYCLInitializer(VarDecl *VD,
bool CheckValueDependent = false);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
SmallVector<SourceLocation, 4> DeclareTargetNesting;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// Return the OMPTraitInfo for the surrounding scope, if any.
OMPTraitInfo *getOMPTraitInfoForSurroundingScope() {
return OMPDeclareVariantScopes.empty() ? nullptr
: OMPDeclareVariantScopes.back().TI;
}
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The current `omp begin/end assumes` scopes.
SmallVector<AssumptionAttr *, 4> OMPAssumeScoped;
/// All `omp assumes` we encountered so far.
SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal;
public:
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope. Return all base functions in \p Bases.
void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists,
SmallVectorImpl<FunctionDecl *> &Bases);
/// Register \p D as specialization of all base functions in \p Bases in the
/// current `omp begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
Decl *D, SmallVectorImpl<FunctionDecl *> &Bases);
/// Act on \p D, a function definition inside of an `omp [begin/end] assumes`.
void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D);
/// Can we exit an OpenMP declare variant scope at the moment.
bool isInOpenMPDeclareVariantScope() const {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp [begin] assume[s]'.
void ActOnOpenMPAssumesDirective(SourceLocation Loc,
OpenMPDirectiveKind DKind,
ArrayRef<StringRef> Assumptions,
bool SkippedClauses);
/// Check if there is an active global `omp begin assumes` directive.
bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); }
/// Check if there is an active global `omp assumes` directive.
bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); }
/// Called on well-formed '#pragma omp end assumes'.
void ActOnOpenMPEndAssumesDirective();
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual SemaDiagnosticBuilder
diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T);
virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) = 0;
virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc);
virtual ~VerifyICEDiagnoser() {}
};
enum AllowFoldKind {
NoFold,
AllowFold,
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
AllowFoldKind CanFold = NoFold) {
return VerifyIntegerConstantExpression(E, nullptr, CanFold);
}
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics
/// unless \p EmitOnBothSides is true.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
const PartialDiagnostic &PD) {
return targetDiag(Loc, PD.getDiagID()) << PD;
}
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
void CheckSYCLKernelCall(FunctionDecl *CallerFunc, SourceRange CallLoc,
ArrayRef<const Expr *> Args);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckIntelFPGARegBuiltinFunctionCall(unsigned BuiltinID, CallExpr *Call);
bool CheckIntelFPGAMemBuiltinFunctionCall(CallExpr *Call);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeDesc);
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckFreeArguments(const CallExpr *E);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Nullable_result = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
private:
// We store SYCL Kernels here and handle separately -- which is a hack.
// FIXME: It would be best to refactor this.
llvm::SetVector<Decl *> SyclDeviceDecls;
// SYCL integration header instance for current compilation unit this Sema
// is associated with.
std::unique_ptr<SYCLIntegrationHeader> SyclIntHeader;
// Used to suppress diagnostics during kernel construction, since these were
// already emitted earlier. Diagnosing during Kernel emissions also skips the
// useful notes that shows where the kernel was called.
bool DiagnosingSYCLKernel = false;
public:
void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.insert(d); }
llvm::SetVector<Decl *> &syclDeviceDecls() { return SyclDeviceDecls; }
/// Lazily creates and returns SYCL integration header instance.
SYCLIntegrationHeader &getSyclIntegrationHeader() {
if (SyclIntHeader == nullptr)
SyclIntHeader = std::make_unique<SYCLIntegrationHeader>(
getDiagnostics(), getLangOpts().SYCLUnnamedLambda, *this);
return *SyclIntHeader.get();
}
enum SYCLRestrictKind {
KernelGlobalVariable,
KernelRTTI,
KernelNonConstStaticDataVariable,
KernelCallVirtualFunction,
KernelUseExceptions,
KernelCallRecursiveFunction,
KernelCallFunctionPointer,
KernelAllocateStorage,
KernelUseAssembly,
KernelCallDllimportFunction,
KernelCallVariadicFunction,
KernelCallUndefinedFunction,
KernelConstStaticVariable
};
bool isKnownGoodSYCLDecl(const Decl *D);
void checkSYCLDeviceVarDecl(VarDecl *Var);
void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC);
void MarkDevice();
/// Emit a diagnostic about the given attribute having a deprecated name, and
/// also emit a fixit hint to generate the new attribute name.
void DiagnoseDeprecatedAttribute(const ParsedAttr &A, StringRef NewScope,
StringRef NewName);
/// Diagnoses an attribute in the 'intelfpga' namespace and suggests using
/// the attribute in the 'intel' namespace instead.
void CheckDeprecatedSYCLAttributeSpelling(const ParsedAttr &A,
StringRef NewName = "");
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
SemaDiagnosticBuilder SYCLDiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Finishes analysis of the deferred functions calls that may be not
/// properly declared for device compilation.
void finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Tells whether given variable is a SYCL explicit SIMD extension's "private
/// global" variable - global variable in the private address space.
bool isSYCLEsimdPrivateGlobal(VarDecl *VDecl) {
return getLangOpts().SYCLIsDevice && getLangOpts().SYCLExplicitSIMD &&
VDecl->hasGlobalStorage() &&
(VDecl->getType().getAddressSpace() == LangAS::opencl_private);
}
};
template <typename AttrType>
void Sema::addIntelSYCLSingleArgFunctionAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E) {
assert(E && "Attribute must have an argument.");
if (!E->isInstantiationDependent()) {
Optional<llvm::APSInt> ArgVal = E->getIntegerConstantExpr(getASTContext());
if (!ArgVal) {
Diag(E->getExprLoc(), diag::err_attribute_argument_type)
<< CI.getAttrName() << AANT_ArgumentIntegerConstant
<< E->getSourceRange();
return;
}
int32_t ArgInt = ArgVal->getSExtValue();
if (CI.getParsedKind() == ParsedAttr::AT_SYCLIntelNumSimdWorkItems ||
CI.getParsedKind() == ParsedAttr::AT_IntelReqdSubGroupSize) {
if (ArgInt <= 0) {
Diag(E->getExprLoc(), diag::err_attribute_requires_positive_integer)
<< CI.getAttrName() << /*positive*/ 0;
return;
}
}
if (CI.getParsedKind() == ParsedAttr::AT_SYCLIntelMaxGlobalWorkDim) {
if (ArgInt < 0) {
Diag(E->getExprLoc(), diag::err_attribute_requires_positive_integer)
<< CI.getAttrName() << /*non-negative*/ 1;
return;
}
if (ArgInt > 3) {
Diag(E->getBeginLoc(), diag::err_attribute_argument_out_of_range)
<< CI.getAttrName() << 0 << 3 << E->getSourceRange();
return;
}
}
}
D->addAttr(::new (Context) AttrType(Context, CI, E));
}
template <typename AttrInfo>
static bool handleMaxWorkSizeAttrExpr(Sema &S, const AttrInfo &AI,
const Expr *E, unsigned &Val,
unsigned Idx) {
assert(E && "Attribute must have an argument.");
if (!E->isInstantiationDependent()) {
Optional<llvm::APSInt> ArgVal =
E->getIntegerConstantExpr(S.getASTContext());
if (!ArgVal) {
S.Diag(AI.getLocation(), diag::err_attribute_argument_type)
<< &AI << AANT_ArgumentIntegerConstant << E->getSourceRange();
return false;
}
if (ArgVal->isNegative()) {
S.Diag(E->getExprLoc(),
diag::warn_attribute_requires_non_negative_integer_argument)
<< E->getType() << S.Context.UnsignedLongLongTy
<< E->getSourceRange();
return true;
}
Val = ArgVal->getZExtValue();
if (Val == 0) {
S.Diag(E->getExprLoc(), diag::err_attribute_argument_is_zero)
<< &AI << E->getSourceRange();
return false;
}
}
return true;
}
template <typename AttrType>
static bool checkMaxWorkSizeAttrArguments(Sema &S, Expr *XDimExpr,
Expr *YDimExpr, Expr *ZDimExpr,
const AttrType &Attr) {
// Accept template arguments for now as they depend on something else.
// We'll get to check them when they eventually get instantiated.
if (XDimExpr->isValueDependent() ||
(YDimExpr && YDimExpr->isValueDependent()) ||
(ZDimExpr && ZDimExpr->isValueDependent()))
return false;
unsigned XDim = 0;
if (!handleMaxWorkSizeAttrExpr(S, Attr, XDimExpr, XDim, 0))
return true;
unsigned YDim = 0;
if (YDimExpr && !handleMaxWorkSizeAttrExpr(S, Attr, YDimExpr, YDim, 1))
return true;
unsigned ZDim = 0;
if (ZDimExpr && !handleMaxWorkSizeAttrExpr(S, Attr, ZDimExpr, ZDim, 2))
return true;
return false;
}
template <typename WorkGroupAttrType>
void Sema::addIntelSYCLTripleArgFunctionAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr,
Expr *ZDimExpr) {
WorkGroupAttrType TmpAttr(Context, CI, XDimExpr, YDimExpr, ZDimExpr);
if (checkMaxWorkSizeAttrArguments(*this, XDimExpr, YDimExpr, ZDimExpr,
TmpAttr))
return;
D->addAttr(::new (Context)
WorkGroupAttrType(Context, CI, XDimExpr, YDimExpr, ZDimExpr));
}
template <typename AttrType>
void Sema::AddOneConstantValueAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E) {
AttrType TmpAttr(Context, CI, E);
if (!E->isValueDependent()) {
ExprResult ICE;
if (checkRangedIntegralArgument<AttrType>(E, &TmpAttr, ICE))
return;
E = ICE.get();
}
if (IntelFPGAPrivateCopiesAttr::classof(&TmpAttr)) {
if (!D->hasAttr<IntelFPGAMemoryAttr>())
D->addAttr(IntelFPGAMemoryAttr::CreateImplicit(
Context, IntelFPGAMemoryAttr::Default));
}
D->addAttr(::new (Context) AttrType(Context, CI, E));
}
template <typename AttrType>
void Sema::AddOneConstantPowerTwoValueAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E) {
AttrType TmpAttr(Context, CI, E);
if (!E->isValueDependent()) {
ExprResult ICE;
if (checkRangedIntegralArgument<AttrType>(E, &TmpAttr, ICE))
return;
Expr::EvalResult Result;
E->EvaluateAsInt(Result, Context);
llvm::APSInt Value = Result.Val.getInt();
if (!Value.isPowerOf2()) {
Diag(CI.getLoc(), diag::err_attribute_argument_not_power_of_two)
<< &TmpAttr;
return;
}
if (IntelFPGANumBanksAttr::classof(&TmpAttr)) {
if (auto *BBA = D->getAttr<IntelFPGABankBitsAttr>()) {
unsigned NumBankBits = BBA->args_size();
if (NumBankBits != Value.ceilLogBase2()) {
Diag(TmpAttr.getLocation(), diag::err_bankbits_numbanks_conflicting);
return;
}
}
}
E = ICE.get();
}
if (!D->hasAttr<IntelFPGAMemoryAttr>())
D->addAttr(IntelFPGAMemoryAttr::CreateImplicit(
Context, IntelFPGAMemoryAttr::Default));
// We are adding a user NumBanks, drop any implicit default.
if (IntelFPGANumBanksAttr::classof(&TmpAttr)) {
if (auto *NBA = D->getAttr<IntelFPGANumBanksAttr>())
if (NBA->isImplicit())
D->dropAttr<IntelFPGANumBanksAttr>();
}
D->addAttr(::new (Context) AttrType(Context, CI, E));
}
template <typename FPGALoopAttrT>
FPGALoopAttrT *Sema::BuildSYCLIntelFPGALoopAttr(const AttributeCommonInfo &A,
Expr *E) {
if (!E && !(A.getParsedKind() == ParsedAttr::AT_SYCLIntelFPGALoopCoalesce))
return nullptr;
if (E && !E->isInstantiationDependent()) {
Optional<llvm::APSInt> ArgVal = E->getIntegerConstantExpr(getASTContext());
if (!ArgVal) {
Diag(E->getExprLoc(), diag::err_attribute_argument_type)
<< A.getAttrName() << AANT_ArgumentIntegerConstant
<< E->getSourceRange();
return nullptr;
}
int Val = ArgVal->getSExtValue();
if (A.getParsedKind() == ParsedAttr::AT_SYCLIntelFPGAInitiationInterval ||
A.getParsedKind() == ParsedAttr::AT_SYCLIntelFPGALoopCoalesce) {
if (Val <= 0) {
Diag(E->getExprLoc(), diag::err_attribute_requires_positive_integer)
<< A.getAttrName() << /* positive */ 0;
return nullptr;
}
} else if (A.getParsedKind() ==
ParsedAttr::AT_SYCLIntelFPGAMaxConcurrency ||
A.getParsedKind() ==
ParsedAttr::AT_SYCLIntelFPGAMaxInterleaving ||
A.getParsedKind() ==
ParsedAttr::AT_SYCLIntelFPGASpeculatedIterations) {
if (Val < 0) {
Diag(E->getExprLoc(), diag::err_attribute_requires_positive_integer)
<< A.getAttrName() << /* non-negative */ 1;
return nullptr;
}
} else {
llvm_unreachable("unknown sycl fpga loop attr");
}
}
return new (Context) FPGALoopAttrT(Context, A, E);
}
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
template <>
void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getHashValue());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
GB_unaryop__minv_uint8_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_uint32
// op(A') function: GB_tran__minv_uint8_uint32
// C type: uint8_t
// A type: uint32_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_uint32
(
uint8_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
spi.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
int main(int argc, char **argv) {
//seed random number generator
// Q2b: get the number of threads to run with from agrv and
// add OpenMP API code to set number of threads here
int Nthreads = argv(atoi);
struct drand48_data *drandData;
drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data));
// Q2c: add an OpenMP parallel region here, wherein each thread initializes
// one entry in drandData using srand48_r and seed based on thread number
#pragma omp parallel
{
long int seed = 0;
srand48_r(seed, drandData+0);
}
long long int Ntrials = 10000000;
//need running tallies
long long int Ntotal=0;
long long int Ncircle=0;
for (long long int n=0; n<Ntrials; n++) {
double rand1;
double rand2;
//gererate two random numbers (use the thread id to offset drandData)
drand48_r(drandData+0, &rand1);
drand48_r(drandData+0, &rand2);
double x = -1 + 2*rand1; //shift to [-1,1]
double y = -1 + 2*rand2;
//check if its in the circle
if (sqrt(x*x+y*y)<=1) Ncircle++;
Ntotal++;
if (n%100 ==0) {
double pi = 4.0*Ncircle/ (double) (n);
printf("Our estimate of pi is %g \n", pi);
}
}
double pi = 4.0*Ncircle/ (double) (Ntotal);
printf("Our final estimate of pi is %g \n", pi);
free(drandData);
return 0;
}
|
build_tree.c | /*******************************************************************************
* 2pt/build_tree.c: this file is part of the FCFC program.
* FCFC: Fast Correlation Function Calculator.
* Github repository:
https://github.com/cheng-zhao/FCFC
* Copyright (c) 2020 -- 2021 Cheng Zhao <zhaocheng03@gmail.com> [MIT license]
*******************************************************************************/
#include "define.h"
#include "build_tree.h"
#include "read_file.h"
#include "kdtree.h"
#include <stdio.h>
/*============================================================================*\
Functions for tree creation and deconstruction
\*============================================================================*/
/******************************************************************************
Function `tree_create`:
Construct the tree from an input catalogue for pair counting.
Arguments:
* `conf`: structure for storing configurations;
* `cf`: structure for correlation function evaluations;
* `idx`: index of the catalogue to be processed;
* `type`: type of the tree.
Return:
Address of the tree on success; NULL on error.
******************************************************************************/
void *tree_create(const CONF *conf, CF *cf, const int idx, const int type) {
if (!conf) {
P_ERR("configuration parameters are not loaded\n");
return NULL;
}
if (!cf) {
P_ERR("correlation function evaluation has not been initialised\n");
return NULL;
}
if (idx < 0 || idx > conf->ninput) {
P_ERR("unexpected index of the catalog: %d\n", idx);
return NULL;
}
printf("Construct the tree for catalog '%c' ...", cf->label[idx]);
if (conf->verbose) printf("\n");
fflush(stdout);
/* Read catalogue from file. */
if (conf->ftype[idx] == 0) {
const size_t skip = (conf->skip) ? conf->skip[idx] : DEFAULT_ASCII_SKIP;
const char cmt = (conf->comment) ? conf->comment[idx] : DEFAULT_ASCII_COMMENT;
const char *wt = (conf->has_wt[idx]) ? conf->wt[idx] : NULL;
const char *sel = (conf->sel) ? conf->sel[idx] : NULL;
if (read_ascii_data(conf->input[idx], skip, cmt, conf->fmtr[idx],
conf->pos + idx * 3, wt, sel, cf->data + idx, cf->ndata + idx,
conf->verbose)) return NULL;
}
else if (conf->ftype[idx] == 2) {
const char *wt = (conf->has_wt[idx]) ? conf->wt[idx] : NULL;
const char *sel = (conf->sel) ? conf->sel[idx] : NULL;
if (read_hdf5_data(conf->input[idx], conf->group[idx],
conf->pos + idx * 3, wt, sel, cf->data + idx, cf->ndata + idx,
conf->verbose)) return NULL;
}
/* Apply coordinate conversion if necessary. */
if ((!conf->cnvt && DEFAULT_COORD_CNVT == true) ||
(conf->cnvt && conf->cnvt[idx])) {
if (cnvt_coord(conf, cf->data[idx], cf->ndata[idx], cf->coord)) return NULL;
}
/* Precompute the squared distance between tracers and the origin,
and compute the total weights if necessary. */
if (cf->wt[idx]) {
double sum = 0;
#ifdef OMP
#pragma omp parallel for reduction(+:sum)
#endif
for (size_t i = 0; i < cf->ndata[idx]; i++) {
cf->data[idx][i].s = cf->data[idx][i].x[0] * cf->data[idx][i].x[0] +
cf->data[idx][i].x[1] * cf->data[idx][i].x[1] +
cf->data[idx][i].x[2] * cf->data[idx][i].x[2];
sum += cf->data[idx][i].w;
}
cf->wdata[idx] = sum;
}
else {
#ifdef OMP
#pragma omp parallel for
#endif
for (size_t i = 0; i < cf->ndata[idx]; i++) {
cf->data[idx][i].s = cf->data[idx][i].x[0] * cf->data[idx][i].x[0] +
cf->data[idx][i].x[1] * cf->data[idx][i].x[1] +
cf->data[idx][i].x[2] * cf->data[idx][i].x[2];
}
cf->wdata[idx] = (double) cf->ndata[idx];
}
/* Construct the tree. */
DATA tmp;
void *tree = NULL;
int err = 0;
switch (type) {
case FCFC_TREE_TYPE_KDTREE:
tree = kdtree_build(cf->data[idx], cf->ndata[idx], &tmp, &err);
if (err) return NULL;
if (conf->verbose) printf(" k-D tree constructed for the catalog\n");
break;
default:
P_ERR("unsupported tree type\n");
return NULL;
}
printf(FMT_DONE);
return tree;
}
/******************************************************************************
Function `tree_destroy`:
Deconstruct a tree used for pair counting.
Arguments:
* `tree`: address of the tree;
* `type`: type of the tree.
******************************************************************************/
void tree_destroy(void *tree, const int type) {
if (!tree) return;
switch (type) {
case FCFC_TREE_TYPE_KDTREE:
kdtree_free((KDT *) tree);
break;
default:
P_WRN("unsupported tree type\n");
}
}
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda_utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return omp_get_max_threads();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i)
ret += coord[i] * stride[i];
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(), from.FlatTo1D<xpu, DType>(), s);
} else {
MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
};
template<typename OP, typename xpu>
struct Kernel;
template<typename OP>
struct Kernel<OP, cpu> {
/*! \brief Launch CPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<cpu> *, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
// Zero means not to use OMP, but don't interfere with external OMP behavior
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads <= 1) {
OP::Map(0, N, args...);
} else {
int length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
gol.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
// Custom includes
#include "gol.h"
/**
* Initialize all variables and structures required by GoL evolution.
*/
void initialize(life_t *life) {
// 1. Initialize the random seed
srand(life->seed);
// 2. Check if an input file was specified in the args
// and, in that case, update ncols and nrows.
//
// Use defaults, if no file is present.
FILE *input_ptr = set_grid_dimens_from_file(life);
// 3. Allocate memory for the grid
malloc_grid(life);
// 4. Initialize the grid with DEAD cells
init_empty_grid(life);
// 5. Initialize the grid with ALIVE cells...
if (input_ptr != NULL) { // ...from file, if present...
init_from_file(life, input_ptr);
} else { // ...or randomly, otherwise.
init_random(life);
}
#ifdef GoL_DEBUG
debug(*life);
usleep(1000000);
#endif
}
/**
* Perform GoL evolution for a given amount of generations.
*
* @return tot_gene_time The total time devolved to GoL evolution
*/
double game(life_t *life) {
int x, y, t;
struct timeval gstart, gend;
// Initialize the whole GoL grid
initialize(life);
int ncols = life->ncols;
int nrows = life->nrows;
double tot_gene_time = 0.;
double cur_gene_time = 0.;
display(*life, false);
for(t = 0; t < life->timesteps; t++) {
// 1. Track the start time
gettimeofday(&gstart, NULL);
// 2. Evolve the current generation
evolve(life);
// 3. Track the end time
gettimeofday(&gend, NULL);
cur_gene_time = elapsed_wtime(gstart, gend);
tot_gene_time += cur_gene_time;
if (is_big(*life)) {
printf("Generation #%d took %.5f ms\n", t, cur_gene_time);
// If the GoL grid is large, print it (to file)
// only at the end of the last generation
if (t == life->timesteps - 1) {
display(*life, true);
}
} else {
display(*life, true);
}
#ifdef GoL_DEBUG
get_grid_status(*life);
#endif
}
printf("\nEvolved GoL's grid for %d generations - ETA: %.5f ms\n",
life->timesteps, tot_gene_time);
return tot_gene_time;
}
/**
* Perform one evolutionary step of the board, following GoL rules, in this order:
* 1. A cell is born, if it has exactly 3 neighbours;
* 2. A cell dies of loneliness, if it has less than 2 neighbours;
* 3. A cell dies of overcrowding, if it has more than 3 neighbours;
* 4. A cell survives to the next generation, if it doesn't die of loneliness or overcrowding.
*/
void evolve(life_t *life) {
int x, y, i, j, r, c;
int alive_neighbs; // # of alive neighbours
int ncols = life->ncols;
int nrows = life->nrows;
// 1. Evolve every cell in the grid
#ifdef _OPENMP
#pragma omp parallel for private(alive_neighbs, y, i, j, r, c)
#endif
for (x = 0; x < nrows; x++)
for (y = 0; y < ncols; y++) {
alive_neighbs = 0;
// 1.a Check the 3x3 neighbourhood
for (i = x - 1; i <= x + 1; i++)
for (j = y - 1; j <= y + 1; j++) {
/* Compute the actual row/col coordinates in the GoL board. */
// Remember that the board represents an hypothetically infinite world. In order to do that,
// it has to be modelled as a circular matrix, with cells along outer borders considered adjacent to one another.
// By applying the modulo operator, %, we account for this possibility.
c = (i + nrows) % nrows;
r = (j + ncols) % ncols;
if (!(i == x && j == y) // Skip the current cell (x, y)
&& life->grid[c][r] == ALIVE)
alive_neighbs++;
}
// 1.b Apply GoL rules to determine the cell's next state
life->next_grid[x][y] = (alive_neighbs == 3
|| (alive_neighbs == 2
&& life->grid[x][y] == ALIVE)) \
? ALIVE : DEAD;
}
// 2. Replace the old grid with the updated one.
swap_grids(&life->grid, &life->next_grid);
}
void cleanup(life_t *life) {
int i;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < life->nrows; i++) {
free(life->grid[i]);
free(life->next_grid[i]);
}
free(life->grid);
free(life->next_grid);
}
/************************************
* ================================ *
************************************/
int main(int argc, char **argv) {
struct timeval start, end;
double cum_gene_time, elapsed_prog_wtime;
int nprocs = 1; // # of running processes
life_t life; // GoL's main data structure
gettimeofday(&start, NULL);
// 1. Initialize vars from args
parse_args(&life, argc, argv);
#ifdef _OPENMP
omp_set_num_threads(life.nthreads);
#endif
FILE *input_ptr = set_grid_dimens_from_file(&life);
#ifdef GoL_MPI /* GoL parallel with MPI */
int rows_per_process;
int from; // Boundaries of the slices of data
int to; // each process will take care of
// 2. Initialize MPI environment
int status = MPI_Init(&argc, &argv);
if (status != MPI_SUCCESS) {
fprintf(stderr, "[*] Failed to initialize MPI environment - errcode %d", status);
MPI_Abort(MPI_COMM_WORLD, 1);
}
chunk_t chunk; // Per-process data structure
// 3. Get info from the MPI communicator
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &chunk.rank);
// Pass the size info to all processes
chunk.size = nprocs;
// 4. Launch GoL's evolution
if (chunk.size != 1) { // If there are at least 2 MPI processes
// launch GoL's parallel evolution...
MPI_Barrier(MPI_COMM_WORLD);
// 4.a Calculate the number of rows
// that each process will handle
rows_per_process = (int) life.nrows/chunk.size;
chunk.displacement = life.nrows % chunk.size;
// 4.b Identify the starting row of each process
from = chunk.rank * rows_per_process;
// 4.c Identify the last row of each process.
if (chunk.rank == chunk.size - 1) { // Last process will keep all remaining rows
to = life.nrows - 1;
chunk.nrows = life.nrows - from;
} else {
to = (chunk.rank + 1) * rows_per_process - 1;
chunk.nrows = rows_per_process;
}
chunk.ncols = life.ncols; // Data is split on rows; hence all processes
// will have the same # of columns
initialize_chunk(&chunk, life,
input_ptr, from, to);
double tot_gtime = game_chunk(&chunk, life);
if (chunk.rank == 0) {
cum_gene_time = tot_gtime;
}
MPI_Barrier(MPI_COMM_WORLD);
cleanup_chunk(&chunk);
if(chunk.rank == 0) {
gettimeofday(&end, NULL);
elapsed_prog_wtime = elapsed_wtime(start, end);
}
} else { // ...else fall back to the sequential procedure
cum_gene_time = game(&life);
cleanup(&life);
gettimeofday(&end, NULL);
elapsed_prog_wtime = elapsed_wtime(start, end);
}
status = MPI_Finalize();
if (status != MPI_SUCCESS) {
fprintf(stderr, "[*] Failed to finalize MPI environment - errcode %d", status);
MPI_Abort(MPI_COMM_WORLD, 1);
}
#else /* GoL sequential */
cum_gene_time = game(&life);
cleanup(&life);
gettimeofday(&end, NULL);
elapsed_prog_wtime = elapsed_wtime(start, end);
#endif
// Log to file, if requested
#ifdef GoL_LOG
#ifdef GoL_MPI
if (chunk.rank == 0) { /* Enforce only the rank 0 process logs to file, if MPI was called.
* Indeed the call to MPI_Finalize() doesn't guarantee
* other processes won't execute the following code */
#endif
FILE *log_ptr = init_log_file(life, nprocs);
log_data(log_ptr, life.timesteps, cum_gene_time,
elapsed_prog_wtime);
fflush(log_ptr);
fclose(log_ptr);
#ifdef GoL_MPI
}
#endif
#endif
#ifdef GoL_MPI
if (chunk.rank == 0) {
#endif
printf("\nFinalized the program - ETA: %.5f ms\n\n", elapsed_prog_wtime);
#ifdef GoL_MPI
}
#endif
} |
convolution_winograd_transform_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 6;
const int h_tiles = (h - 2) / 6;
const int tiles = w_tiles * h_tiles;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _r06 = vld1q_f32(r0 + 24);
float32x4_t _r07 = vld1q_f32(r0 + 28);
float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f);
float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f);
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f);
float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f);
float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f);
float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
vst1q_f32(tmp[5][m], _tmp5m);
vst1q_f32(tmp[6][m], _tmp6m);
vst1q_f32(tmp[7][m], _tmp7m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
float* r0_tm_6 = r0_tm_0 + tiles * 24;
float* r0_tm_7 = r0_tm_0 + tiles * 28;
for (int m = 0; m < 8; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f);
float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f);
float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f);
float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f);
float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f);
float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f);
float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f);
float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f);
float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
vst1q_f32(r0_tm_6, _r0tm6);
vst1q_f32(r0_tm_7, _r0tm7);
r0_tm_0 += tiles * 32;
r0_tm_1 += tiles * 32;
r0_tm_2 += tiles * 32;
r0_tm_3 += tiles * 32;
r0_tm_4 += tiles * 32;
r0_tm_5 += tiles * 32;
r0_tm_6 += tiles * 32;
r0_tm_7 += tiles * 32;
}
}
}
}
}
static void conv3x3s1_winograd63_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 6;
const int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f);
float tmp[6][8][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
const float* output0_tm_6 = output0_tm_0 + tiles * 24;
const float* output0_tm_7 = output0_tm_0 + tiles * 28;
float* output0 = out0.row(i * 6) + (j * 6) * 4;
for (int m = 0; m < 8; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _out0tm6 = vld1q_f32(output0_tm_6);
float32x4_t _out0tm7 = vld1q_f32(output0_tm_7);
float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f));
float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f);
float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f);
float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f);
float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f);
float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f));
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
vst1q_f32(tmp[5][m], _tmp5m);
output0_tm_0 += tiles * 32;
output0_tm_1 += tiles * 32;
output0_tm_2 += tiles * 32;
output0_tm_3 += tiles * 32;
output0_tm_4 += tiles * 32;
output0_tm_5 += tiles * 32;
output0_tm_6 += tiles * 32;
output0_tm_7 += tiles * 32;
}
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp06 = vld1q_f32(tmp[m][6]);
float32x4_t _tmp07 = vld1q_f32(tmp[m][7]);
float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02);
float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04);
float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06);
float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f));
float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f));
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f));
float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 12, _out03);
vst1q_f32(output0 + 16, _out04);
vst1q_f32(output0 + 20, _out05);
output0 += outw * 4;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 4;
const int h_tiles = (h - 2) / 4;
const int tiles = w_tiles * h_tiles;
// const float itm[6][6] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[6][6][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _r04 = vld1q_f32(r0 + 16);
float32x4_t _r05 = vld1q_f32(r0 + 20);
float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f);
float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f);
float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f);
float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f);
float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
vst1q_f32(tmp[4][m], _tmp4m);
vst1q_f32(tmp[5][m], _tmp5m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
float* r0_tm_4 = r0_tm_0 + tiles * 16;
float* r0_tm_5 = r0_tm_0 + tiles * 20;
for (int m = 0; m < 6; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f);
float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f);
float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f);
float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
vst1q_f32(r0_tm_4, _r0tm4);
vst1q_f32(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 24;
r0_tm_1 += tiles * 24;
r0_tm_2 += tiles * 24;
r0_tm_3 += tiles * 24;
r0_tm_4 += tiles * 24;
r0_tm_5 += tiles * 24;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 4;
const int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f);
float tmp[4][6][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
const float* output0_tm_4 = output0_tm_0 + tiles * 16;
const float* output0_tm_5 = output0_tm_0 + tiles * 20;
float* output0 = out0.row(i * 4) + (j * 4) * 4;
for (int m = 0; m < 6; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _out0tm4 = vld1q_f32(output0_tm_4);
float32x4_t _out0tm5 = vld1q_f32(output0_tm_5);
float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2);
float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b);
float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f);
float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f);
float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
output0_tm_0 += tiles * 24;
output0_tm_1 += tiles * 24;
output0_tm_2 += tiles * 24;
output0_tm_3 += tiles * 24;
output0_tm_4 += tiles * 24;
output0_tm_5 += tiles * 24;
}
for (int m = 0; m < 4; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _tmp04 = vld1q_f32(tmp[m][4]);
float32x4_t _tmp05 = vld1q_f32(tmp[m][5]);
float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02);
float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02);
float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04);
float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b));
float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f));
float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f));
float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 4, _out01);
vst1q_f32(output0 + 8, _out02);
vst1q_f32(output0 + 12, _out03);
output0 += outw * 4;
}
}
}
}
}
static void conv3x3s1_winograd23_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 2;
const int h_tiles = (h - 2) / 2;
const int tiles = w_tiles * h_tiles;
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
// 0 = r00 - r02
// 1 = r01 + r02
// 2 = r02 - r01
// 3 = r03 - r01
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[4][4][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 2) + (j * 2) * 4;
for (int m = 0; m < 4; m++)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r02 = vld1q_f32(r0 + 8);
float32x4_t _r03 = vld1q_f32(r0 + 12);
float32x4_t _tmp0m = vsubq_f32(_r00, _r02);
float32x4_t _tmp1m = vaddq_f32(_r01, _r02);
float32x4_t _tmp2m = vsubq_f32(_r02, _r01);
float32x4_t _tmp3m = vsubq_f32(_r03, _r01);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
vst1q_f32(tmp[2][m], _tmp2m);
vst1q_f32(tmp[3][m], _tmp3m);
r0 += w * 4;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4;
float* r0_tm_1 = r0_tm_0 + tiles * 4;
float* r0_tm_2 = r0_tm_0 + tiles * 8;
float* r0_tm_3 = r0_tm_0 + tiles * 12;
for (int m = 0; m < 4; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _r0tm0 = vsubq_f32(_tmp00, _tmp02);
float32x4_t _r0tm1 = vaddq_f32(_tmp01, _tmp02);
float32x4_t _r0tm2 = vsubq_f32(_tmp02, _tmp01);
float32x4_t _r0tm3 = vsubq_f32(_tmp03, _tmp01);
vst1q_f32(r0_tm_0, _r0tm0);
vst1q_f32(r0_tm_1, _r0tm1);
vst1q_f32(r0_tm_2, _r0tm2);
vst1q_f32(r0_tm_3, _r0tm3);
r0_tm_0 += tiles * 16;
r0_tm_1 += tiles * 16;
r0_tm_2 += tiles * 16;
r0_tm_3 += tiles * 16;
}
}
}
}
}
static void conv3x3s1_winograd23_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 2;
const int h_tiles = outh / 2;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r00 + r01 + r02
// 1 = r01 - r02 + r03
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f);
float tmp[2][4][4];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4;
const float* output0_tm_1 = output0_tm_0 + tiles * 4;
const float* output0_tm_2 = output0_tm_0 + tiles * 8;
const float* output0_tm_3 = output0_tm_0 + tiles * 12;
float* output0 = out0.row(i * 2) + (j * 2) * 4;
for (int m = 0; m < 4; m++)
{
float32x4_t _out0tm0 = vld1q_f32(output0_tm_0);
float32x4_t _out0tm1 = vld1q_f32(output0_tm_1);
float32x4_t _out0tm2 = vld1q_f32(output0_tm_2);
float32x4_t _out0tm3 = vld1q_f32(output0_tm_3);
float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _out0tm1), _out0tm2);
float32x4_t _tmp1m = vaddq_f32(vsubq_f32(_out0tm1, _out0tm2), _out0tm3);
vst1q_f32(tmp[0][m], _tmp0m);
vst1q_f32(tmp[1][m], _tmp1m);
output0_tm_0 += tiles * 16;
output0_tm_1 += tiles * 16;
output0_tm_2 += tiles * 16;
output0_tm_3 += tiles * 16;
}
for (int m = 0; m < 2; m++)
{
float32x4_t _tmp00 = vld1q_f32(tmp[m][0]);
float32x4_t _tmp01 = vld1q_f32(tmp[m][1]);
float32x4_t _tmp02 = vld1q_f32(tmp[m][2]);
float32x4_t _tmp03 = vld1q_f32(tmp[m][3]);
float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp01), _tmp02));
float32x4_t _out01 = vaddq_f32(_bias0, vaddq_f32(vsubq_f32(_tmp01, _tmp02), _tmp03));
vst1q_f32(output0, _out00);
vst1q_f32(output0 + 4, _out01);
output0 += outw * 4;
}
}
}
}
}
|
Simulation.c | #include "XSbench_header.h"
////////////////////////////////////////////////////////////////////////////////////
// BASELINE FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// All "baseline" code is at the top of this file. The baseline code is a simple
// implementation of the algorithm, with only minor CPU optimizations in place.
// Following these functions are a number of optimized variants,
// which each deploy a different combination of optimizations strategies. By
// default, XSBench will only run the baseline implementation. Optimized variants
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation(Inputs in, SimulationData SD, int mype)
{
if( mype == 0)
printf("Beginning event based simulation...\n");
////////////////////////////////////////////////////////////////////////////////
// SUMMARY: Simulation Data Structure Manifest for "SD" Object
// Here we list all heap arrays (and lengths) in SD that would need to be
// offloaded manually if using an accelerator with a seperate memory space
////////////////////////////////////////////////////////////////////////////////
// int * num_nucs; // Length = length_num_nucs;
// double * concs; // Length = length_concs
// int * mats; // Length = length_mats
// double * unionized_energy_array; // Length = length_unionized_energy_array
// int * index_grid; // Length = length_index_grid
// NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid
//
// Note: "unionized_energy_array" and "index_grid" can be of zero length
// depending on lookup method.
//
// Note: "Lengths" are given as the number of objects in the array, not the
// number of bytes.
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Begin Actual Simulation Loop
////////////////////////////////////////////////////////////////////////////////
unsigned long long verification = 0;
#pragma omp parallel for schedule(dynamic,100) reduction(+:verification)
for( int i = 0; i < in.lookups; i++ )
{
#ifdef AML
int * num_nucs = aml_replicaset_hwloc_local_replica(SD.num_nucs_replica);
double * concs = aml_replicaset_hwloc_local_replica(SD.concs_replica);
double * unionized_energy_array = aml_replicaset_hwloc_local_replica(SD.unionized_energy_array_replica);
int * index_grid = aml_replicaset_hwloc_local_replica(SD.index_grid_replica);
NuclideGridPoint * nuclide_grid = aml_replicaset_hwloc_local_replica(SD.nuclide_grid_replica);
#else
int * num_nucs = SD.num_nucs;
double * concs = SD.concs;
double * unionized_energy_array = SD.unionized_energy_array;
int * index_grid = SD.index_grid;
NuclideGridPoint * nuclide_grid = SD.nuclide_grid;
#endif
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
num_nucs, // 1-D array with number of nuclides per material
concs, // Flattened 2-D array with concentration of each nuclide in each material
unionized_energy_array, // 1-D Unionized energy array
index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
SD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we prevent thread
// contention by using an OMP reduction on the verification value.
// For accelerators, a different approach might be required
// (e.g., atomics, reduction of thread-specific values in large
// array via CUDA thrust, etc).
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
verification += max_idx+1;
}
return verification;
}
unsigned long long run_history_based_simulation(Inputs in, SimulationData SD, int mype)
{
if( mype == 0)
printf("Beginning history based simulation...\n");
////////////////////////////////////////////////////////////////////////////////
// SUMMARY: Simulation Data Structure Manifest for "SD" Object
// Here we list all heap arrays (and lengths) in SD that would need to be
// offloaded manually if using an accelerator with a seperate memory space
////////////////////////////////////////////////////////////////////////////////
// int * num_nucs; // Length = length_num_nucs;
// double * concs; // Length = length_concs
// int * mats; // Length = length_mats
// double * unionized_energy_array; // Length = length_unionized_energy_array
// int * index_grid; // Length = length_index_grid
// NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid
//
// Note: "unionized_energy_array" and "index_grid" can be of zero length
// depending on lookup method.
//
// Note: "Lengths" are given as the number of objects in the array, not the
// number of bytes.
////////////////////////////////////////////////////////////////////////////////
unsigned long long verification = 0;
// Begin outer lookup loop over particles. This loop is independent.
#pragma omp parallel for schedule(dynamic, 100) reduction(+:verification)
for( int p = 0; p < in.particles; p++ )
{
#ifdef AML
int * num_nucs = aml_replicaset_hwloc_local_replica(SD.num_nucs_replica);
double * concs = aml_replicaset_hwloc_local_replica(SD.concs_replica);
double * unionized_energy_array = aml_replicaset_hwloc_local_replica(SD.unionized_energy_array_replica);
int * index_grid = aml_replicaset_hwloc_local_replica(SD.index_grid_replica);
NuclideGridPoint * nuclide_grid = aml_replicaset_hwloc_local_replica(SD.nuclide_grid_replica);
#else
int * num_nucs = SD.num_nucs;
double * concs = SD.concs;
double * unionized_energy_array = SD.unionized_energy_array;
int * index_grid = SD.index_grid;
NuclideGridPoint * nuclide_grid = SD.nuclide_grid;
#endif
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup, and
// we may fast forward up to 5 times after each lookup)
seed = fast_forward_LCG(seed, p*in.lookups*2*5);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
// Inner XS Lookup Loop
// This loop is dependent!
// i.e., Next iteration uses data computed in previous iter.
for( int i = 0; i < in.lookups; i++ )
{
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
num_nucs, // 1-D array with number of nuclides per material
concs, // Flattened 2-D array with concentration of each nuclide in each material
unionized_energy_array, // 1-D Unionized energy array
index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
SD.mats, // Flattened 2-D array with nuclide indices for each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookups)
SD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we prevent thread
// contention by using an OMP reduction on it. For other accelerators,
// a different approach might be required (e.g., atomics, reduction
// of thread-specific values in large array via CUDA thrust, etc)
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
verification += max_idx+1;
// Randomly pick next energy and material for the particle
// Also incorporates results from macro_xs lookup to
// enforce loop dependency.
// In a real MC app, this dependency is expressed in terms
// of branching physics sampling, whereas here we are just
// artificially enforcing this dependence based on fast
// forwarding the LCG state
uint64_t n_forward = 0;
for( int j = 0; j < 5; j++ )
if( macro_xs_vector[j] > 1.0 )
n_forward++;
if( n_forward > 0 )
seed = fast_forward_LCG(seed, n_forward);
p_energy = LCG_random_double(&seed);
mat = pick_mat(&seed);
}
}
return verification;
}
// Calculates the microscopic cross section for a given nuclide & energy
void calculate_micro_xs( double p_energy, int nuc, long n_isotopes,
long n_gridpoints,
double * restrict egrid, int * restrict index_data,
NuclideGridPoint * restrict nuclide_grids,
long idx, double * restrict xs_vector, int grid_type, int hash_bins ){
// Variables
double f;
NuclideGridPoint * low, * high;
// If using only the nuclide grid, we must perform a binary search
// to find the energy location in this particular nuclide's grid.
if( grid_type == NUCLIDE )
{
// Perform binary search on the Nuclide Grid to find the index
idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1);
// pull ptr from nuclide grid and check to ensure that
// we're not reading off the end of the nuclide's grid
if( idx == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + idx - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + idx];
}
else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed.
{
// pull ptr from energy grid and check to ensure that
// we're not reading off the end of the nuclide's grid
if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]];
}
else // Hash grid
{
// load lower bounding index
int u_low = index_data[idx * n_isotopes + nuc];
// Determine higher bounding index
int u_high;
if( idx == hash_bins - 1 )
u_high = n_gridpoints - 1;
else
u_high = index_data[(idx+1)*n_isotopes + nuc] + 1;
// Check edge cases to make sure energy is actually between these
// Then, if things look good, search for gridpoint in the nuclide grid
// within the lower and higher limits we've calculated.
double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy;
double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy;
int lower;
if( p_energy <= e_low )
lower = 0;
else if( p_energy >= e_high )
lower = n_gridpoints - 1;
else
lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high);
if( lower == n_gridpoints - 1 )
low = &nuclide_grids[nuc*n_gridpoints + lower - 1];
else
low = &nuclide_grids[nuc*n_gridpoints + lower];
}
high = low + 1;
// calculate the re-useable interpolation factor
f = (high->energy - p_energy) / (high->energy - low->energy);
// Total XS
xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs);
// Elastic XS
xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs);
// Absorbtion XS
xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs);
// Fission XS
xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs);
// Nu Fission XS
xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs);
}
// Calculates macroscopic cross section based on a given material & energy
void calculate_macro_xs( double p_energy, int mat, long n_isotopes,
long n_gridpoints, int * restrict num_nucs,
double * restrict concs,
double * restrict egrid, int * restrict index_data,
NuclideGridPoint * restrict nuclide_grids,
int * restrict mats,
double * restrict macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){
int p_nuc; // the nuclide we are looking up
long idx = -1;
double conc; // the concentration of the nuclide in the material
// cleans out macro_xs_vector
for( int k = 0; k < 5; k++ )
macro_xs_vector[k] = 0;
// If we are using the unionized energy grid (UEG), we only
// need to perform 1 binary search per macroscopic lookup.
// If we are using the nuclide grid search, it will have to be
// done inside of the "calculate_micro_xs" function for each different
// nuclide in the material.
if( grid_type == UNIONIZED )
idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid);
else if( grid_type == HASH )
{
double du = 1.0 / hash_bins;
idx = p_energy / du;
}
// Once we find the pointer array on the UEG, we can pull the data
// from the respective nuclide grids, as well as the nuclide
// concentration data for the material
// Each nuclide from the material needs to have its micro-XS array
// looked up & interpolatied (via calculate_micro_xs). Then, the
// micro XS is multiplied by the concentration of that nuclide
// in the material, and added to the total macro XS array.
// (Independent -- though if parallelizing, must use atomic operations
// or otherwise control access to the xs_vector and macro_xs_vector to
// avoid simulataneous writing to the same data structure)
for( int j = 0; j < num_nucs[mat]; j++ )
{
double xs_vector[5];
p_nuc = mats[mat*max_num_nucs + j];
conc = concs[mat*max_num_nucs + j];
calculate_micro_xs( p_energy, p_nuc, n_isotopes,
n_gridpoints, egrid, index_data,
nuclide_grids, idx, xs_vector, grid_type, hash_bins );
for( int k = 0; k < 5; k++ )
macro_xs_vector[k] += xs_vector[k] * conc;
}
}
// binary search for energy on unionized energy grid
// returns lower index
long grid_search( long n, double quarry, double * restrict A)
{
long lowerLimit = 0;
long upperLimit = n-1;
long examinationPoint;
long length = upperLimit - lowerLimit;
while( length > 1 )
{
examinationPoint = lowerLimit + ( length / 2 );
if( A[examinationPoint] > quarry )
upperLimit = examinationPoint;
else
lowerLimit = examinationPoint;
length = upperLimit - lowerLimit;
}
return lowerLimit;
}
// binary search for energy on nuclide energy grid
long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high)
{
long lowerLimit = low;
long upperLimit = high;
long examinationPoint;
long length = upperLimit - lowerLimit;
while( length > 1 )
{
examinationPoint = lowerLimit + ( length / 2 );
if( A[examinationPoint].energy > quarry )
upperLimit = examinationPoint;
else
lowerLimit = examinationPoint;
length = upperLimit - lowerLimit;
}
return lowerLimit;
}
// picks a material based on a probabilistic distribution
int pick_mat( uint64_t * seed )
{
// I have a nice spreadsheet supporting these numbers. They are
// the fractions (by volume) of material in the core. Not a
// *perfect* approximation of where XS lookups are going to occur,
// but this will do a good job of biasing the system nonetheless.
double dist[12];
dist[0] = 0.140; // fuel
dist[1] = 0.052; // cladding
dist[2] = 0.275; // cold, borated water
dist[3] = 0.134; // hot, borated water
dist[4] = 0.154; // RPV
dist[5] = 0.064; // Lower, radial reflector
dist[6] = 0.066; // Upper reflector / top plate
dist[7] = 0.055; // bottom plate
dist[8] = 0.008; // bottom nozzle
dist[9] = 0.015; // top nozzle
dist[10] = 0.025; // top of fuel assemblies
dist[11] = 0.013; // bottom of fuel assemblies
double roll = LCG_random_double(seed);
// makes a pick based on the distro
for( int i = 0; i < 12; i++ )
{
double running = 0;
for( int j = i; j > 0; j-- )
running += dist[j];
if( roll < running )
return i;
}
return 0;
}
double LCG_random_double(uint64_t * seed)
{
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
const uint64_t a = 2806196910506780709ULL;
const uint64_t c = 1ULL;
*seed = (a * (*seed) + c) % m;
return (double) (*seed) / (double) m;
}
uint64_t fast_forward_LCG(uint64_t seed, uint64_t n)
{
// LCG parameters
const uint64_t m = 9223372036854775808ULL; // 2^63
uint64_t a = 2806196910506780709ULL;
uint64_t c = 1ULL;
n = n % m;
uint64_t a_new = 1;
uint64_t c_new = 0;
while(n > 0)
{
if(n & 1)
{
a_new *= a;
c_new = c_new * a + c;
}
c *= (a + 1);
a *= a;
n >>= 1;
}
return (a_new * seed + c_new) % m;
}
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// OPTIMIZED VARIANT FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////
// This section contains a number of optimized variants of some of the above
// functions, which each deploy a different combination of optimizations strategies.
// By default, XSBench will not run any of these variants. They
// must be specifically selected using the "-k <optimized variant ID>" command
// line argument.
//
// As fast parallel sorting will be required for these optimizations, we will
// first define a set of key-value parallel quicksort routines.
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////
// Parallel Quicksort Key-Value Sorting Algorithms
////////////////////////////////////////////////////////////////////////////////////
//
// These algorithms are based on the parallel quicksort implementation by
// Eduard Lopez published at https://github.com/eduardlopez/quicksort-parallel
//
// Eduard's original version was for an integer type quicksort, but I have modified
// it to form two different versions that can sort key-value pairs together without
// having to bundle them into a separate object. Additionally, I have modified the
// optimal chunk sizes and restricted the number of threads for the array sizing
// that XSBench will be using by default.
//
// Eduard's original implementation carries the following license, which applies to
// the following functions only:
//
// void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff)
// void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads)
// void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff)
// void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads)
//
// The MIT License (MIT)
//
// Copyright (c) 2016 Eduard López
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
//
////////////////////////////////////////////////////////////////////////////////////
void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff)
{
int i = left, j = right;
int tmp;
int pivot = key[(left + right) / 2];
{
while (i <= j) {
while (key[i] < pivot)
i++;
while (key[j] > pivot)
j--;
if (i <= j) {
tmp = key[i];
key[i] = key[j];
key[j] = tmp;
double tmp_v = value[i];
value[i] = value[j];
value[j] = tmp_v;
i++;
j--;
}
}
}
if ( ((right-left)<cutoff) ){
if (left < j){ quickSort_parallel_internal_i_d(key, value, left, j, cutoff); }
if (i < right){ quickSort_parallel_internal_i_d(key, value, i, right, cutoff); }
}else{
#pragma omp task
{ quickSort_parallel_internal_i_d(key, value, left, j, cutoff); }
#pragma omp task
{ quickSort_parallel_internal_i_d(key, value, i, right, cutoff); }
}
}
void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads){
// Set minumum problem size to still spawn threads for
int cutoff = 10000;
// For this problem size, more than 16 threads on CPU is not helpful
if( numThreads > 16 )
numThreads = 16;
#pragma omp parallel num_threads(numThreads)
{
#pragma omp single nowait
{
quickSort_parallel_internal_i_d(key,value, 0, lenArray-1, cutoff);
}
}
}
void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff)
{
int i = left, j = right;
double tmp;
double pivot = key[(left + right) / 2];
{
while (i <= j) {
while (key[i] < pivot)
i++;
while (key[j] > pivot)
j--;
if (i <= j) {
tmp = key[i];
key[i] = key[j];
key[j] = tmp;
int tmp_v = value[i];
value[i] = value[j];
value[j] = tmp_v;
i++;
j--;
}
}
}
if ( ((right-left)<cutoff) ){
if (left < j){ quickSort_parallel_internal_d_i(key, value, left, j, cutoff); }
if (i < right){ quickSort_parallel_internal_d_i(key, value, i, right, cutoff); }
}else{
#pragma omp task
{ quickSort_parallel_internal_d_i(key, value, left, j, cutoff); }
#pragma omp task
{ quickSort_parallel_internal_d_i(key, value, i, right, cutoff); }
}
}
void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads){
// Set minumum problem size to still spawn threads for
int cutoff = 10000;
// For this problem size, more than 16 threads on CPU is not helpful
if( numThreads > 16 )
numThreads = 16;
#pragma omp parallel num_threads(numThreads)
{
#pragma omp single nowait
{
quickSort_parallel_internal_d_i(key,value, 0, lenArray-1, cutoff);
}
}
}
////////////////////////////////////////////////////////////////////////////////////
// Optimization 1 -- Event-based Sample/XS Lookup kernel splitting + Sorting
// lookups by material and energy
////////////////////////////////////////////////////////////////////////////////////
// This kernel separates out the sampling and lookup regions of the event-based
// model, and then sorts the lookups by material type and energy. The goal of this
// optimization is to allow for greatly improved cache locality, and XS indices
// loaded from memory may be re-used for multiple lookups.
//
// As efficienct sorting is key for performance, we also must implement an
// efficient key-value parallel sorting algorithm. We also experimented with using
// the C++ version of thrust for these purposes, but found that our own implemtation
// was slightly faster than the thrust library version, so for speed and
// simplicity we will do not add the thrust dependency.
////////////////////////////////////////////////////////////////////////////////////
unsigned long long run_event_based_simulation_optimization_1(Inputs in, SimulationData SD, int mype)
{
char * optimization_name = "Optimization 1 - Kernel splitting + full material & energy sort";
if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name);
////////////////////////////////////////////////////////////////////////////////
// Allocate Additional Data Structures Needed by Optimized Kernel
////////////////////////////////////////////////////////////////////////////////
if( mype == 0) printf("Allocating additional data required by optimized kernel...\n");
size_t sz;
size_t total_sz = 0;
double start, stop;
sz = in.lookups * sizeof(double);
SD.p_energy_samples = (double *) malloc(sz);
total_sz += sz;
SD.length_p_energy_samples = in.lookups;
sz = in.lookups * sizeof(int);
SD.mat_samples = (int *) malloc(sz);
total_sz += sz;
SD.length_mat_samples = in.lookups;
if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0);
////////////////////////////////////////////////////////////////////////////////
// Begin Actual Simulation
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Sample Materials and Energies
////////////////////////////////////////////////////////////////////////////////
#pragma omp parallel for schedule(dynamic, 100)
for( int i = 0; i < in.lookups; i++ )
{
// Set the initial seed value
uint64_t seed = STARTING_SEED;
// Forward seed to lookup index (we need 2 samples per lookup)
seed = fast_forward_LCG(seed, 2*i);
// Randomly pick an energy and material for the particle
double p_energy = LCG_random_double(&seed);
int mat = pick_mat(&seed);
SD.p_energy_samples[i] = p_energy;
SD.mat_samples[i] = mat;
}
if(mype == 0) printf("finished sampling...\n");
////////////////////////////////////////////////////////////////////////////////
// Sort by Material
////////////////////////////////////////////////////////////////////////////////
start = get_time();
quickSort_parallel_i_d(SD.mat_samples, SD.p_energy_samples, in.lookups, in.nthreads);
stop = get_time();
if(mype == 0) printf("Material sort took %.3lf seconds\n", stop-start);
////////////////////////////////////////////////////////////////////////////////
// Sort by Energy
////////////////////////////////////////////////////////////////////////////////
start = get_time();
// Count up number of each type of sample.
int num_samples_per_mat[12] = {0};
for( int l = 0; l < in.lookups; l++ )
num_samples_per_mat[ SD.mat_samples[l] ]++;
// Determine offsets
int offsets[12] = {0};
for( int m = 1; m < 12; m++ )
offsets[m] = offsets[m-1] + num_samples_per_mat[m-1];
stop = get_time();
if(mype == 0) printf("Counting samples and offsets took %.3lf seconds\n", stop-start);
start = stop;
// Sort each material type by energy level
int offset = 0;
for( int m = 0; m < 12; m++ )
quickSort_parallel_d_i(SD.p_energy_samples + offsets[m],SD.mat_samples + offsets[m], num_samples_per_mat[m], in.nthreads);
stop = get_time();
if(mype == 0) printf("Energy Sorts took %.3lf seconds\n", stop-start);
////////////////////////////////////////////////////////////////////////////////
// Perform lookups for each material separately
////////////////////////////////////////////////////////////////////////////////
start = get_time();
unsigned long long verification = 0;
// Individual Materials
offset = 0;
for( int m = 0; m < 12; m++ )
{
#pragma omp parallel for schedule(dynamic,100) reduction(+:verification)
for( int i = offset; i < offset + num_samples_per_mat[m]; i++)
{
#ifdef AML
int * num_nucs = aml_replicaset_hwloc_local_replica(SD.num_nucs_replica);
double * concs = aml_replicaset_hwloc_local_replica(SD.concs_replica);
double * unionized_energy_array = aml_replicaset_hwloc_local_replica(SD.unionized_energy_array_replica);
int * index_grid = aml_replicaset_hwloc_local_replica(SD.index_grid_replica);
NuclideGridPoint * nuclide_grid = aml_replicaset_hwloc_local_replica(SD.nuclide_grid_replica);
#else
int * num_nucs = SD.num_nucs;
double * concs = SD.concs;
double * unionized_energy_array = SD.unionized_energy_array;
int * index_grid = SD.index_grid;
NuclideGridPoint * nuclide_grid = SD.nuclide_grid;
#endif
// load pre-sampled energy and material for the particle
double p_energy = SD.p_energy_samples[i];
int mat = SD.mat_samples[i];
double macro_xs_vector[5] = {0};
// Perform macroscopic Cross Section Lookup
calculate_macro_xs(
p_energy, // Sampled neutron energy (in lethargy)
mat, // Sampled material type index neutron is in
in.n_isotopes, // Total number of isotopes in simulation
in.n_gridpoints, // Number of gridpoints per isotope in simulation
num_nucs, // 1-D array with number of nuclides per material
concs, // Flattened 2-D array with concentration of each nuclide in each material
unionized_energy_array, // 1-D Unionized energy array
index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level
nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation
SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material
macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels)
in.grid_type, // Lookup type (nuclide, hash, or unionized)
in.hash_bins, // Number of hash bins used (if using hash lookup type)
SD.max_num_nucs // Maximum number of nuclides present in any material
);
// For verification, and to prevent the compiler from optimizing
// all work out, we interrogate the returned macro_xs_vector array
// to find its maximum value index, then increment the verification
// value by that index. In this implementation, we prevent thread
// contention by using an OMP reduction on the verification value.
// For accelerators, a different approach might be required
// (e.g., atomics, reduction of thread-specific values in large
// array via CUDA thrust, etc).
double max = -1.0;
int max_idx = 0;
for(int j = 0; j < 5; j++ )
{
if( macro_xs_vector[j] > max )
{
max = macro_xs_vector[j];
max_idx = j;
}
}
verification += max_idx+1;
}
offset += num_samples_per_mat[m];
}
stop = get_time();
if(mype == 0) printf("XS Lookups took %.3lf seconds\n", stop-start);
return verification;
}
|
GB_unaryop__ainv_int8_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int8_uint64
// op(A') function: GB_tran__ainv_int8_uint64
// C type: int8_t
// A type: uint64_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int8_uint64
(
int8_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int8_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB023-sections1-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two tasks without synchronization to protect data write, causing data races.
Data race pair: i@58:5 vs. i@60:5
*/
#include <stdio.h>
int main()
{
int i=0;
#pragma omp parallel sections
{
#pragma omp section
i = 1;
#pragma omp section
i = 2;
}
printf("i=%d\n",i);
return 0;
}
|
serial_tree_learner.h | /*!
* Original work Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Modified work Copyright (c) 2020 Fabio Sigrist. All rights reserved.
* Licensed under the Apache License Version 2.0 See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
#include <LightGBM/dataset.h>
#include <LightGBM/tree.h>
#include <LightGBM/tree_learner.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/random.h>
#include <string>
#include <cmath>
#ifndef AVOID_NOT_CRAN_COMPLIANT_CALLS
#include <cstdio>
#endif
#include <memory>
#include <random>
#include <vector>
#include "data_partition.hpp"
#include "feature_histogram.hpp"
#include "leaf_splits.hpp"
#include "split_info.hpp"
#ifdef USE_GPU
// Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled.
// This is necessary to pin the two arrays in memory and make transferring faster.
#include <boost/align/aligned_allocator.hpp>
#endif
using namespace json11;
namespace LightGBM {
/*! \brief forward declaration */
class CostEfficientGradientBoosting;
/*!
* \brief Used for learning a tree by single machine
*/
class SerialTreeLearner: public TreeLearner {
public:
friend CostEfficientGradientBoosting;
explicit SerialTreeLearner(const Config* config);
~SerialTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetTrainingData(const Dataset* train_data) override;
void ResetConfig(const Config* config) override;
Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian,
const Json& forced_split_json) override;
Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override;
Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred,
const score_t* gradients, const score_t* hessians) override;
void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override {
data_partition_->SetUsedDataIndices(used_indices, num_data);
}
void AddPredictionToScore(const Tree* tree, double* out_score) const override {
if (tree->num_leaves() <= 1) { return; }
CHECK(tree->num_leaves() <= data_partition_->num_leaves());
#pragma omp parallel for schedule(static)
for (int i = 0; i < tree->num_leaves(); ++i) {
double output = static_cast<double>(tree->LeafOutput(i));
data_size_t cnt_leaf_data = 0;
auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
out_score[tmp_idx[j]] += output;
}
}
}
void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override;
void GetDataLeafIndices(Tree* tree, int* data_leaf_index) const override;
protected:
virtual std::vector<int8_t> GetUsedFeatures(bool is_tree_level);
/*!
* \brief Some initial works before training
*/
virtual void BeforeTrain();
/*!
* \brief Some initial works before FindBestSplit
*/
virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf);
virtual void FindBestSplits();
virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
/*!
* \brief Partition tree and data according best split.
* \param tree Current tree, will be splitted on this function.
* \param best_leaf The index of leaf that will be splitted.
* \param left_leaf The index of left leaf after splitted.
* \param right_leaf The index of right leaf after splitted.
*/
virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf);
/* Force splits with forced_split_json dict and then return num splits forced.*/
virtual int32_t ForceSplits(Tree* tree, const Json& forced_split_json, int* left_leaf,
int* right_leaf, int* cur_depth,
bool *aborted_last_force_split);
/*!
* \brief Get the number of data in a leaf
* \param leaf_idx The index of leaf
* \return The number of data in the leaf_idx leaf
*/
inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const;
/*! \brief number of data */
data_size_t num_data_;
/*! \brief number of features */
int num_features_;
/*! \brief training data */
const Dataset* train_data_;
/*! \brief gradients of current iteration */
const score_t* gradients_;
/*! \brief hessians of current iteration */
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief used for generate used features */
Random random_;
/*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */
std::vector<int8_t> is_feature_used_;
/*! \brief used feature indices in current tree */
std::vector<int> used_feature_indices_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
FeatureHistogram* smaller_leaf_histogram_array_;
/*! \brief pointer to histograms array of larger leaf */
FeatureHistogram* larger_leaf_histogram_array_;
/*! \brief store best split points for all leaves */
std::vector<SplitInfo> best_split_per_leaf_;
/*! \brief store best split per feature for all leaves */
std::vector<SplitInfo> splits_per_leaf_;
/*! \brief stores best thresholds for all feature for smaller leaf */
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
std::vector<int> valid_feature_indices_;
#ifdef USE_GPU
/*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_;
#else
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_hessians_;
#endif
/*! \brief Store ordered bin */
std::vector<std::unique_ptr<OrderedBin>> ordered_bins_;
/*! \brief True if has ordered bin */
bool has_ordered_bin_ = false;
/*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */
std::vector<char> is_data_in_leaf_;
/*! \brief used to cache historical histogram to speed up*/
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const Config* config_;
int num_threads_;
std::vector<int> ordered_bin_indices_;
bool is_constant_hessian_;
std::unique_ptr<CostEfficientGradientBoosting> cegb_;
};
inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const {
if (leaf_idx >= 0) {
return data_partition_->leaf_count(leaf_idx);
} else {
return 0;
}
}
} // namespace LightGBM
#endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
|
Trainer.h | /*
* Copyright 2016 [See AUTHORS file for list of authors]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _TRAINER_
#define _TRAINER_
#include <limits.h>
#include <float.h>
DEFINE_bool(random_batch_processing, false, "Process batches in random order. Note this may disrupt catch-up.");
DEFINE_bool(random_per_batch_datapoint_processing, false, "Process datapoints in random order per batch. Note this may disrupt catch-up.");
DEFINE_int32(interval_print, 1, "Interval in which to print the loss.");
// Contains times / losses / etc
struct TrainStatistics {
std::vector<double> times;
std::vector<double> losses;
};
typedef struct TrainStatistics TrainStatistics;
class Trainer {
protected:
void TrackTimeLoss(double cur_time, double cur_loss, TrainStatistics *stats) {
stats->times.push_back(cur_time);
stats->losses.push_back(cur_loss);
}
void PrintPartitionTime(Timer &timer) {
printf("Partition Time(s): %f\n", timer.Elapsed());
}
void PrintTimeLoss(double cur_time, double cur_loss, int epoch) {
printf("Epoch: %d\tTime(s): %f\tLoss: %lf\t\n", epoch, cur_time, cur_loss);
}
void EpochBegin(int epoch, Timer &gradient_timer, Model *model, const std::vector<Datapoint *> &datapoints, TrainStatistics *stats) {
double cur_time = gradient_timer.Elapsed();
double cur_loss = model->ComputeLoss(datapoints);
this->TrackTimeLoss(cur_time, cur_loss, stats);
if (FLAGS_print_loss_per_epoch && epoch % FLAGS_interval_print == 0) {
this->PrintTimeLoss(cur_time, cur_loss, epoch);
}
}
public:
Trainer() {
// Some error checking.
if (FLAGS_n_threads > std::thread::hardware_concurrency()) {
std::cerr << "Trainer: Number of threads is greater than the number of physical cores." << std::endl;
//exit(0);
}
// Basic set up, like pinning to core, setting number of threads.
omp_set_num_threads(FLAGS_n_threads);
#pragma omp parallel
{
pin_to_core(omp_get_thread_num());
}
}
virtual ~Trainer() {}
// Main training method.
virtual TrainStatistics Train(Model *model, const std::vector<Datapoint *> & datapoints, Updater *updater) = 0;
};
#endif
|
irbuilder_unroll_partial_factor.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@unroll_partial_factor(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store i32 0, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_LOOP_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_HEADER]]:
// CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ 0, %[[OMP_LOOP_PREHEADER]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_COND]]:
// CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[DOTCOUNT]]
// CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[OMP_LOOP_IV]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP3:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP4]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP3]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP5:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[TMP6:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP7]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP6]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP8:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP5]], %[[TMP8]]
// CHECK-NEXT: %[[TMP9:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP10]] to i64
// CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP9]], i64 %[[IDXPROM4]]
// CHECK-NEXT: %[[TMP11:.+]] = load float, float* %[[ARRAYIDX5]], align 4
// CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP11]]
// CHECK-NEXT: %[[TMP12:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP13:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP13]] to i64
// CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP12]], i64 %[[IDXPROM7]]
// CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_INC]]:
// CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1
// CHECK-NEXT: br label %[[OMP_LOOP_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_EXIT]]:
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
void unroll_partial_factor(float *a, float *b, float *c, float *d) {
#pragma omp unroll partial(3)
for (int i = 0; i < 2; i++) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 2, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[SUB]], %[[TMP8]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP9:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP9]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
// CHECK: ![[META2:[0-9]+]] =
// CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]}
// CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
// CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 3}
|
GB_unaryop__ainv_uint8_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_uint32
// op(A') function: GB_tran__ainv_uint8_uint32
// C type: uint8_t
// A type: uint32_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_uint32
(
uint8_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bxnor_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bxnor_int64
// A.*B function (eWiseMult): GB_AemultB__bxnor_int64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bxnor_int64
// C+=b function (dense accum): GB_Cdense_accumb__bxnor_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bxnor_int64
// C=scalar+B GB_bind1st__bxnor_int64
// C=scalar+B' GB_bind1st_tran__bxnor_int64
// C=A+scalar GB_bind2nd__bxnor_int64
// C=A'+scalar GB_bind2nd_tran__bxnor_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = ~((x) ^ (y)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_INT64 || GxB_NO_BXNOR_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bxnor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bxnor_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bxnor_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bxnor_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bxnor_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bxnor_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t bij = Bx [p] ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bxnor_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB_bind1st_tran__bxnor_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB_bind2nd_tran__bxnor_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gvflib.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "idl_export.h"
double double_max(double, double);
int gvf_natural(
long *iterations_ptr,
short *x_max_ptr, short *y_max_ptr,
double *dt_ptr,
double *u_idl, double *v_idl,
double *fx, double *fy,
double *g, double *h,
double *err_tol_ptr
){
unsigned short i, k, iterations;
short x, y;
long ci, x_max, y_max;
double dt;
double *u_next, *v_next, *aux;
double *u, *v;
double uxx, uyy;
double vxx, vyy;
double err, err_tol, norm, norm_ant;
double *errs, *norms;
char buf[1024];
x_max = (long)*x_max_ptr;
y_max = (long)*y_max_ptr;
iterations = *iterations_ptr;
dt = *dt_ptr;
err_tol = *err_tol_ptr;
u= (double *)malloc(x_max * y_max * sizeof(double));
v= (double *)malloc(x_max * y_max * sizeof(double));
u_next = (double *)malloc(x_max * y_max * sizeof(double));
v_next = (double *)malloc(x_max * y_max * sizeof(double));
errs = (double *)malloc(omp_get_max_threads() * sizeof(double));
norms = (double *)malloc(omp_get_max_threads() * sizeof(double));
ci = 0;
for(y = 0; y < y_max; y++) {
for(x = 0; x < x_max; x++) {
u[ci] = u_idl[ci];
v[ci] = v_idl[ci];
ci++;
}
}
for(i = 0; i < iterations; i++) {
err = 0;
norm = 0;
norm_ant = 0;
for(k = 0; k < omp_get_max_threads(); k++) {
errs[k] = 0;
norms[k] = 0;
}
#pragma omp parallel for \
private(i,ci,\
x,y, \
uxx,uyy,\
vxx,vyy\
) \
shared(u,v,dt,\
x_max,y_max,\
err, norm_ant, norm,\
errs, norms \
)
for(ci = 0; ci < x_max * y_max; ci++) {
x = ci % x_max;
y = ci / x_max;
if(x == 0) {
uxx = (-2 * u[ci] + u[ci+1])/2;
vxx = (-2 * v[ci] + v[ci+1])/2;
if(y == 0) {
uyy = (-2 * u[ci] + u[ci+x_max])/2;
vyy = (-2 * v[ci] + v[ci+x_max])/2;
}
else if(y == y_max-1) {
uyy = (u[ci-x_max] - 2*u[ci])/2;
vyy = (v[ci-x_max] - 2*v[ci])/2;
}
else {
uyy = (u[ci+x_max] - 2*u[ci] + u[ci-x_max]);
vyy = (v[ci+x_max] - 2*v[ci] + v[ci-x_max]);
}
}
else if(x == x_max-1) {
uxx = (u[ci-1] - 2*u[ci])/2;
vxx = (v[ci-1] - 2*v[ci])/2;
if(y == 0) {
uyy = (-2 * u[ci] + u[ci+x_max])/2;
vyy = (-2 * v[ci] + v[ci+x_max])/2;
}
else if(y == y_max-1) {
uyy = (u[ci-x_max] - 2*u[ci])/2;
vyy = (v[ci-x_max] - 2*v[ci])/2;
}
else {
uyy = (u[ci+x_max] - 2*u[ci] + u[ci-x_max]);
vyy = (v[ci+x_max] - 2*v[ci] + v[ci-x_max]);
}
}
else {
uxx = (u[ci+1] - 2*u[ci] + u[ci-1])/2;
vxx = (v[ci+1] - 2*v[ci] + v[ci-1])/2;
if(y == 0) {
uyy = (-2 * u[ci] + u[ci+x_max])/2;
vyy = (-2 * v[ci] + v[ci+x_max])/2;
}
else if(y == y_max-1) {
uyy = (u[ci-x_max] - 2*u[ci])/2;
vyy = (v[ci-x_max] - 2*v[ci])/2;
}
else {
uyy = (u[ci+x_max] - 2*u[ci] + u[ci-x_max]);
vyy = (v[ci+x_max] - 2*v[ci] + v[ci-x_max]);
}
}
u_next[ci] = u[ci] + dt*(g[ci]*(uxx + uyy)- h[ci]*(u[ci] - fx[ci]));
v_next[ci] = v[ci] + dt*(g[ci]*(vxx + vyy) -h[ci]*(v[ci] - fy[ci]));
errs[omp_get_thread_num()] +=
pow(g[ci]*(uxx + uyy)- h[ci]*(u[ci] - fx[ci]), 2) +
pow(g[ci]*(vxx + vyy) -h[ci]*(v[ci] - fy[ci]), 2);
norms[omp_get_thread_num()] += pow(u[ci], 2) + pow(v[ci], 2);
/*
err +=
pow(g[ci]*(uxx + uyy)- h[ci]*(u[ci] - fx[ci]), 2) +
pow(g[ci]*(vxx + vyy) -h[ci]*(v[ci] - fy[ci]), 2);
norm += pow(u[ci], 2) + pow(v[ci], 2);
*/
}
aux = u;
u = u_next;
u_next = aux;
aux = v;
v = v_next;
v_next = aux;
for(k = 0; k < omp_get_max_threads(); k++) {
err += errs[k];
norm += norms[k];
}
err = sqrt(err);
norm = sqrt(norm);
err /= double_max(norm, norm_ant);
if(err < err_tol || i == iterations-1) {
sprintf(buf, "stopped at err = %f, it= %d", err, i);
IDL_Message(IDL_M_GENERIC, IDL_MSG_INFO, buf);
break;
}
norm_ant = norm;
}
ci = 0;
for(y = 0; y < y_max; y++) {
for(x = 0; x< x_max; x++) {
u_idl[ci] = u[ci];
v_idl[ci] = v[ci];
ci++;
}
}
free(u);
free(v);
free(u_next);
free(v_next);
return 1;
}
double double_max(double a, double b) {
if(a>b) return a ;
else return b;
}
int gvf(int argc, void *argv[]) {
if(argc != 11)
return 0;
return gvf_natural(
(long *) argv[0],
(short *)argv[1], (short *)argv[2],
(double *)argv[3],
(double *)argv[4], (double *)argv[5],
(double *)argv[6], (double *)argv[7],
(double *)argv[8], (double *)argv[9],
(double *)argv[10]
);
}
|
luo_rudy_1991.c | #include "luo_rudy_1991.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
sv[0] = -84.380111f; //V millivolt
sv[1] = 0.001713f; //m dimensionless
sv[2] = 0.982661f; //h dimensionless
sv[3] = 0.989108f; //j dimensionless
sv[4] = 0.003021f; //d dimensionless
sv[5] = 0.999968f; //f dimensionless
sv[6] = 0.041760f; //X dimensionless
sv[7] = 0.000179f; //Cai millimolar
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current);
for(int i = 0; i < NEQ; i++)
sv[i] = dt*rDY[i] + rY[i];
}
#define IFNUMBER_1(name)if((V_old_<(-4.000000000000000e+01f))) { (name) = (1.350000000000000e-01f*expf(((8.000000000000000e+01f+V_old_)/(-6.800000000000000e+00f)))); } else{ (name) = 0.000000000000000e+00f; }
#define IFNUMBER_2(name)if((V_old_<(-4.000000000000000e+01f))) { (name) = ((3.560000000000000e+00f*expf((7.900000000000000e-02f*V_old_)))+(3.100000000000000e+05f*expf((3.500000000000000e-01f*V_old_)))); } else{ (name) = (1.000000000000000e+00f/(1.300000000000000e-01f*(1.000000000000000e+00f+expf(((V_old_+1.066000000000000e+01f)/(-1.110000000000000e+01f)))))); }
#define IFNUMBER_3(name)if((V_old_<(-4.000000000000000e+01f))) { (name) = (((((-1.271400000000000e+05f)*expf((2.444000000000000e-01f*V_old_)))-(3.474000000000000e-05f*expf(((-4.391000000000000e-02f)*V_old_))))*(V_old_+3.778000000000000e+01f))/(1.000000000000000e+00f+expf((3.110000000000000e-01f*(V_old_+7.923000000000000e+01f))))); } else{ (name) = 0.000000000000000e+00f; }
#define IFNUMBER_4(name)if((V_old_<(-4.000000000000000e+01f))) { (name) = ((1.212000000000000e-01f*expf(((-1.052000000000000e-02f)*V_old_)))/(1.000000000000000e+00f+expf(((-1.378000000000000e-01f)*(V_old_+4.014000000000000e+01f))))); } else{ (name) = ((3.000000000000000e-01f*expf(((-2.535000000000000e-07f)*V_old_)))/(1.000000000000000e+00f+expf(((-1.000000000000000e-01f)*(V_old_+3.200000000000000e+01f))))); }
#define IFNUMBER_5(name)if((V_old_>(-1.000000000000000e+02f))) { (name) = ((2.837000000000000e+00f*(expf((4.000000000000000e-02f*(V_old_+7.700000000000000e+01f)))-1.000000000000000e+00f))/((V_old_+7.700000000000000e+01f)*expf((4.000000000000000e-02f*(V_old_+3.500000000000000e+01f))))); } else{ (name) = 1.000000000000000e+00f; }
void RHS_cpu(const real *sv, real *rDY_, real stim_current) {
//State variables
const real V_old_ = sv[0];
const real m_old_ = sv[1];
const real h_old_ = sv[2];
const real j_old_ = sv[3];
const real d_old_ = sv[4];
const real f_old_ = sv[5];
const real X_old_ = sv[6];
const real Cai_old_ = sv[7];
//Parameters
const real C = 1.000000000000000e+00f;
const real R = 8.314000000000000e+03f;
const real T = 3.100000000000000e+02f;
const real F = 9.648460000000001e+04f;
const real Nao = 1.400000000000000e+02f;
const real Nai = 1.800000000000000e+01f;
const real g_Na = 2.300000000000000e+01f;
const real Ko = 5.400000000000000e+00f;
const real PR_NaK = 1.833000000000000e-02f;
const real Ki = 1.450000000000000e+02f;
const real g_Kp = 1.830000000000000e-02f;
const real g_b = 3.921000000000000e-02f;
const real E_b = -5.987000000000000e+01f;
real calc_I_stim = stim_current;
real calc_E_Na = (((R*T)/F)*logf((Nao/Nai))); //2
real calc_alpha_m = ((3.200000000000000e-01f*(V_old_+4.713000000000000e+01f))/(1.000000000000000e+00f-expf(((-1.000000000000000e-01f)*(V_old_+4.713000000000000e+01f))))); //4
real calc_beta_m = (8.000000000000000e-02f*expf(((-V_old_)/1.100000000000000e+01f))); //5
real calc_alpha_h = 0.0f;
IFNUMBER_1(calc_alpha_h); //7
real calc_beta_h = 0.0f;
IFNUMBER_2(calc_beta_h); //8
real calc_alpha_j = 0.0f;
IFNUMBER_3(calc_alpha_j); //10
real calc_beta_j = 0.0f;
IFNUMBER_4(calc_beta_j); //11
real calc_E_si = (7.700000000000000e+00f-(1.302870000000000e+01f*logf((Cai_old_/1.000000000000000e+00f)))); //13
real calc_alpha_d = ((9.500000000000000e-02f*expf(((-1.000000000000000e-02f)*(V_old_-5.000000000000000e+00f))))/(1.000000000000000e+00f+expf(((-7.199999999999999e-02f)*(V_old_-5.000000000000000e+00f))))); //15
real calc_beta_d = ((7.000000000000001e-02f*expf(((-1.700000000000000e-02f)*(V_old_+4.400000000000000e+01f))))/(1.000000000000000e+00f+expf((5.000000000000000e-02f*(V_old_+4.400000000000000e+01f))))); //16
real calc_alpha_f = ((1.200000000000000e-02f*expf(((-8.000000000000000e-03f)*(V_old_+2.800000000000000e+01f))))/(1.000000000000000e+00f+expf((1.500000000000000e-01f*(V_old_+2.800000000000000e+01f))))); //18
real calc_beta_f = ((6.500000000000000e-03f*expf(((-2.000000000000000e-02f)*(V_old_+3.000000000000000e+01f))))/(1.000000000000000e+00f+expf(((-2.000000000000000e-01f)*(V_old_+3.000000000000000e+01f))))); //19
real calc_g_K = (2.820000000000000e-01f*powf((Ko/5.400000000000000e+00f),0.5f)); //21
real calc_E_K = (((R*T)/F)*logf(((Ko+(PR_NaK*Nao))/(Ki+(PR_NaK*Nai))))); //22
real calc_alpha_X = ((5.000000000000000e-04f*expf((8.300000000000000e-02f*(V_old_+5.000000000000000e+01f))))/(1.000000000000000e+00f+expf((5.700000000000000e-02f*(V_old_+5.000000000000000e+01f))))); //24
real calc_beta_X = ((1.300000000000000e-03f*expf(((-6.000000000000000e-02f)*(V_old_+2.000000000000000e+01f))))/(1.000000000000000e+00f+expf(((-4.000000000000000e-02f)*(V_old_+2.000000000000000e+01f))))); //25
real calc_Xi = 0.0f;
IFNUMBER_5(calc_Xi); //27
real calc_g_K1 = (6.047000000000000e-01f*powf((Ko/5.400000000000000e+00f),0.5f)); //28
real calc_E_K1 = (((R*T)/F)*logf((Ko/Ki))); //29
real calc_Kp = (1.000000000000000e+00f/(1.000000000000000e+00f+expf(((7.488000000000000e+00f-V_old_)/5.980000000000000e+00f)))); //35
real calc_i_b = (g_b*(V_old_-E_b)); //37
real calc_i_Na = (g_Na*powf(m_old_,3.000000000000000e+00f)*h_old_*j_old_*(V_old_-calc_E_Na)); //3
real calc_i_si = (9.000000000000000e-02f*d_old_*f_old_*(V_old_-calc_E_si)); //14
real calc_alpha_K1 = (1.020000000000000e+00f/(1.000000000000000e+00f+expf((2.385000000000000e-01f*((V_old_-calc_E_K1)-5.921500000000000e+01f))))); //31
real calc_beta_K1 = (((4.912400000000000e-01f*expf((8.032000000000000e-02f*((V_old_+5.476000000000000e+00f)-calc_E_K1))))+(1.000000000000000e+00f*expf((6.175000000000000e-02f*(V_old_-(calc_E_K1+5.943099999999999e+02f))))))/(1.000000000000000e+00f+expf(((-5.143000000000000e-01f)*((V_old_-calc_E_K1)+4.753000000000000e+00f))))); //32
real calc_E_Kp = calc_E_K1; //34
real calc_i_K = (calc_g_K*X_old_*calc_Xi*(V_old_-calc_E_K)); //23
real calc_K1_infinity = (calc_alpha_K1/(calc_alpha_K1+calc_beta_K1)); //33
real calc_i_Kp = (g_Kp*calc_Kp*(V_old_-calc_E_Kp)); //36
real calc_i_K1 = (calc_g_K1*calc_K1_infinity*(V_old_-calc_E_K1)); //30
rDY_[0] = (((-1.000000000000000e+00f)/C)*(calc_I_stim+calc_i_Na+calc_i_si+calc_i_K+calc_i_K1+calc_i_Kp+calc_i_b));
rDY_[1] = ((calc_alpha_m*(1.000000000000000e+00f-m_old_))-(calc_beta_m*m_old_));
rDY_[2] = ((calc_alpha_h*(1.000000000000000e+00f-h_old_))-(calc_beta_h*h_old_));
rDY_[3] = ((calc_alpha_j*(1.000000000000000e+00f-j_old_))-(calc_beta_j*j_old_));
rDY_[4] = ((calc_alpha_d*(1.000000000000000e+00f-d_old_))-(calc_beta_d*d_old_));
rDY_[5] = ((calc_alpha_f*(1.000000000000000e+00f-f_old_))-(calc_beta_f*f_old_));
rDY_[6] = ((calc_alpha_X*(1.000000000000000e+00f-X_old_))-(calc_beta_X*X_old_));
rDY_[7] = ((((-1.000000000000000e-04f)/1.000000000000000e+00f)*calc_i_si)+(7.000000000000001e-02f*(1.000000000000000e-04f-Cai_old_)));
}
|
GB_unop__sqrt_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__sqrt_fp32_fp32
// op(A') function: GB_unop_tran__sqrt_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = sqrtf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = sqrtf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = sqrtf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SQRT || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__sqrt_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = sqrtf (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__sqrt_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
eliminate.c | #include "heads.h"
void eliminate(int base, int target, int col) {
double base_num = matrix[base][col];
double multi = (double)matrix[target][col] / base_num;
// start from col to reduce time complexity.(item before col is already become 0)
if(multi != 0){
for (int i = col; i < SIZE; i++) {
// function discussed in readme.md
matrix[target][i] -= matrix[base][i] * multi;
}
vec[target][0] -= vec[base][0] * multi;
}
}
void eliminate_all(int nthreads) {
if (nthreads == 1) {
for (int i = 0; i < SIZE - 1; i++) {
for (int j = i + 1; j < SIZE; j++) {
eliminate(i, j, i);
}
}
}
else {
omp_set_num_threads(nthreads);
int i, j;
#pragma omp parallel for private(j)
for (int i = 0; i < SIZE - 1; i++) {
for (int j = i + 1; j < SIZE; j++) {
eliminate(i, j, i);
}
}
}
} |
kpoint.c | /* kpoint.c */
/* Copyright (C) 2008 Atsushi Togo */
#include <stdio.h>
#include <stdlib.h>
#include "mathfunc.h"
#include "kpoint.h"
const int kpt_bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = {
{ 0, 0, 0},
{ 0, 0, 1},
{ 0, 0, 2},
{ 0, 0, -2},
{ 0, 0, -1},
{ 0, 1, 0},
{ 0, 1, 1},
{ 0, 1, 2},
{ 0, 1, -2},
{ 0, 1, -1},
{ 0, 2, 0},
{ 0, 2, 1},
{ 0, 2, 2},
{ 0, 2, -2},
{ 0, 2, -1},
{ 0, -2, 0},
{ 0, -2, 1},
{ 0, -2, 2},
{ 0, -2, -2},
{ 0, -2, -1},
{ 0, -1, 0},
{ 0, -1, 1},
{ 0, -1, 2},
{ 0, -1, -2},
{ 0, -1, -1},
{ 1, 0, 0},
{ 1, 0, 1},
{ 1, 0, 2},
{ 1, 0, -2},
{ 1, 0, -1},
{ 1, 1, 0},
{ 1, 1, 1},
{ 1, 1, 2},
{ 1, 1, -2},
{ 1, 1, -1},
{ 1, 2, 0},
{ 1, 2, 1},
{ 1, 2, 2},
{ 1, 2, -2},
{ 1, 2, -1},
{ 1, -2, 0},
{ 1, -2, 1},
{ 1, -2, 2},
{ 1, -2, -2},
{ 1, -2, -1},
{ 1, -1, 0},
{ 1, -1, 1},
{ 1, -1, 2},
{ 1, -1, -2},
{ 1, -1, -1},
{ 2, 0, 0},
{ 2, 0, 1},
{ 2, 0, 2},
{ 2, 0, -2},
{ 2, 0, -1},
{ 2, 1, 0},
{ 2, 1, 1},
{ 2, 1, 2},
{ 2, 1, -2},
{ 2, 1, -1},
{ 2, 2, 0},
{ 2, 2, 1},
{ 2, 2, 2},
{ 2, 2, -2},
{ 2, 2, -1},
{ 2, -2, 0},
{ 2, -2, 1},
{ 2, -2, 2},
{ 2, -2, -2},
{ 2, -2, -1},
{ 2, -1, 0},
{ 2, -1, 1},
{ 2, -1, 2},
{ 2, -1, -2},
{ 2, -1, -1},
{-2, 0, 0},
{-2, 0, 1},
{-2, 0, 2},
{-2, 0, -2},
{-2, 0, -1},
{-2, 1, 0},
{-2, 1, 1},
{-2, 1, 2},
{-2, 1, -2},
{-2, 1, -1},
{-2, 2, 0},
{-2, 2, 1},
{-2, 2, 2},
{-2, 2, -2},
{-2, 2, -1},
{-2, -2, 0},
{-2, -2, 1},
{-2, -2, 2},
{-2, -2, -2},
{-2, -2, -1},
{-2, -1, 0},
{-2, -1, 1},
{-2, -1, 2},
{-2, -1, -2},
{-2, -1, -1},
{-1, 0, 0},
{-1, 0, 1},
{-1, 0, 2},
{-1, 0, -2},
{-1, 0, -1},
{-1, 1, 0},
{-1, 1, 1},
{-1, 1, 2},
{-1, 1, -2},
{-1, 1, -1},
{-1, 2, 0},
{-1, 2, 1},
{-1, 2, 2},
{-1, 2, -2},
{-1, 2, -1},
{-1, -2, 0},
{-1, -2, 1},
{-1, -2, 2},
{-1, -2, -2},
{-1, -2, -1},
{-1, -1, 0},
{-1, -1, 1},
{-1, -1, 2},
{-1, -1, -2},
{-1, -1, -1}
};
static MatINT *get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal);
static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const int num_q,
SPGCONST double qpoints[][3]);
static int get_ir_reciprocal_mesh(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT * rot_reciprocal);
static int
get_ir_reciprocal_mesh_openmp(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT* rot_reciprocal);
static int relocate_BZ_grid_address(int bz_grid_address[][3],
int bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3]);
static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3],
const int mesh[3]);
static int get_grid_point_double_mesh(const int address_double[3],
const int mesh[3]);
static int get_grid_point_single_mesh(const int address[3],
const int mesh[3]);
static void reduce_grid_address(int address[3],
const int address_double[3],
const int mesh[3]);
int kpt_get_grid_point_double_mesh(const int address_double[3],
const int mesh[3])
{
return get_grid_point_double_mesh(address_double, mesh);
}
/* grid_address (e.g. 4x4x4 mesh, unless GRID_ORDER_XYZ is defined) */
/* [[ 0 0 0] */
/* [ 1 0 0] */
/* [ 2 0 0] */
/* [-1 0 0] */
/* [ 0 1 0] */
/* [ 1 1 0] */
/* [ 2 1 0] */
/* [-1 1 0] */
/* .... ] */
/* */
/* Each value of 'map' correspnds to the index of grid_point. */
int kpt_get_irreducible_reciprocal_mesh(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
int num_ir;
#ifdef _OPENMP
num_ir = get_ir_reciprocal_mesh_openmp(grid_address,
map,
mesh,
is_shift,
rot_reciprocal);
#else
num_ir = get_ir_reciprocal_mesh(grid_address,
map,
mesh,
is_shift,
rot_reciprocal);
#endif
return num_ir;
}
int kpt_get_stabilized_reciprocal_mesh(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const int is_time_reversal,
const MatINT * rotations,
const int num_q,
SPGCONST double qpoints[][3])
{
int num_ir;
MatINT *rot_reciprocal, *rot_reciprocal_q;
double tolerance;
rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal);
tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]);
rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal,
tolerance,
num_q,
qpoints);
#ifdef _OPENMP
num_ir = get_ir_reciprocal_mesh_openmp(grid_address,
map,
mesh,
is_shift,
rot_reciprocal_q);
#else
num_ir = get_ir_reciprocal_mesh(grid_address,
map,
mesh,
is_shift,
rot_reciprocal_q);
#endif
mat_free_MatINT(rot_reciprocal_q);
mat_free_MatINT(rot_reciprocal);
return num_ir;
}
void kpt_get_grid_points_by_rotations(int rot_grid_points[],
const int address_orig[3],
const MatINT * rot_reciprocal,
const int mesh[3],
const int is_shift[3])
{
int i;
int address_double_orig[3], address_double[3];
for (i = 0; i < 3; i++) {
address_double_orig[i] = address_orig[i] * 2 + is_shift[i];
}
for (i = 0; i < rot_reciprocal->size; i++) {
mat_multiply_matrix_vector_i3(address_double,
rot_reciprocal->mat[i],
address_double_orig);
rot_grid_points[i] = get_grid_point_double_mesh(address_double, mesh);
}
}
void kpt_get_BZ_grid_points_by_rotations(int rot_grid_points[],
const int address_orig[3],
const MatINT * rot_reciprocal,
const int mesh[3],
const int is_shift[3],
const int bz_map[])
{
int i;
int address_double_orig[3], address_double[3], bzmesh[3];
for (i = 0; i < 3; i++) {
bzmesh[i] = mesh[i] * 2;
address_double_orig[i] = address_orig[i] * 2 + is_shift[i];
}
for (i = 0; i < rot_reciprocal->size; i++) {
mat_multiply_matrix_vector_i3(address_double,
rot_reciprocal->mat[i],
address_double_orig);
rot_grid_points[i] =
bz_map[get_grid_point_double_mesh(address_double, bzmesh)];
}
}
int kpt_relocate_BZ_grid_address(int bz_grid_address[][3],
int bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3])
{
return relocate_BZ_grid_address(bz_grid_address,
bz_map,
grid_address,
mesh,
rec_lattice,
is_shift);
}
MatINT *kpt_get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal)
{
return get_point_group_reciprocal(rotations, is_time_reversal);
}
MatINT *kpt_get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const int num_q,
SPGCONST double qpoints[][3])
{
return get_point_group_reciprocal_with_q(rot_reciprocal,
symprec,
num_q,
qpoints);
}
static MatINT *get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal)
{
int i, j, num_rot;
MatINT *rot_reciprocal, *rot_return;
int *unique_rot;
SPGCONST int inversion[3][3] = {
{-1, 0, 0 },
{ 0,-1, 0 },
{ 0, 0,-1 }
};
if (is_time_reversal) {
rot_reciprocal = mat_alloc_MatINT(rotations->size * 2);
} else {
rot_reciprocal = mat_alloc_MatINT(rotations->size);
}
unique_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size);
for (i = 0; i < rot_reciprocal->size; i++) {
unique_rot[i] = -1;
}
for (i = 0; i < rotations->size; i++) {
mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]);
if (is_time_reversal) {
mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size+i],
inversion,
rot_reciprocal->mat[i]);
}
}
num_rot = 0;
for (i = 0; i < rot_reciprocal->size; i++) {
for (j = 0; j < num_rot; j++) {
if (mat_check_identity_matrix_i3(rot_reciprocal->mat[unique_rot[j]],
rot_reciprocal->mat[i])) {
goto escape;
}
}
unique_rot[num_rot] = i;
num_rot++;
escape:
;
}
rot_return = mat_alloc_MatINT(num_rot);
for (i = 0; i < num_rot; i++) {
mat_copy_matrix_i3(rot_return->mat[i], rot_reciprocal->mat[unique_rot[i]]); }
free(unique_rot);
mat_free_MatINT(rot_reciprocal);
return rot_return;
}
static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const int num_q,
SPGCONST double qpoints[][3])
{
int i, j, k, l, is_all_ok, num_rot;
int *ir_rot;
double q_rot[3], diff[3];
MatINT * rot_reciprocal_q;
is_all_ok = 0;
num_rot = 0;
ir_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size);
for (i = 0; i < rot_reciprocal->size; i++) {
ir_rot[i] = -1;
}
for (i = 0; i < rot_reciprocal->size; i++) {
for (j = 0; j < num_q; j++) {
is_all_ok = 0;
mat_multiply_matrix_vector_id3(q_rot,
rot_reciprocal->mat[i],
qpoints[j]);
for (k = 0; k < num_q; k++) {
for (l = 0; l < 3; l++) {
diff[l] = q_rot[l] - qpoints[k][l];
diff[l] -= mat_Nint(diff[l]);
}
if (mat_Dabs(diff[0]) < symprec &&
mat_Dabs(diff[1]) < symprec &&
mat_Dabs(diff[2]) < symprec) {
is_all_ok = 1;
break;
}
}
if (! is_all_ok) {
break;
}
}
if (is_all_ok) {
ir_rot[num_rot] = i;
num_rot++;
}
}
rot_reciprocal_q = mat_alloc_MatINT(num_rot);
for (i = 0; i < num_rot; i++) {
mat_copy_matrix_i3(rot_reciprocal_q->mat[i],
rot_reciprocal->mat[ir_rot[i]]);
}
free(ir_rot);
return rot_reciprocal_q;
}
static int get_ir_reciprocal_mesh(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
/* In the following loop, mesh is doubled. */
/* Even and odd mesh numbers correspond to */
/* is_shift[i] are 0 or 1, respectively. */
/* is_shift = [0,0,0] gives Gamma center mesh. */
/* grid: reducible grid points */
/* map: the mapping from each point to ir-point. */
int i, j, k, l, grid_point, grid_point_rot, num_ir = 0;
int address[3], address_double[3], address_double_rot[3];
/* "-1" means the element is not touched yet. */
for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) {
map[i] = -1;
}
#ifndef GRID_ORDER_XYZ
for (i = 0; i < mesh[2]; i++) {
for (j = 0; j < mesh[1]; j++) {
for (k = 0; k < mesh[0]; k++) {
address[0] = k;
address[1] = j;
address[2] = i;
#else
for (i = 0; i < mesh[0]; i++) {
for (j = 0; j < mesh[1]; j++) {
for (k = 0; k < mesh[2]; k++) {
address[0] = i;
address[1] = j;
address[2] = k;
#endif
for (l = 0; l < 3; l++) {
address_double[l] = address[l] * 2 + is_shift[l];
}
grid_point = get_grid_point_double_mesh(address_double, mesh);
reduce_grid_address(grid_address[grid_point], address, mesh);
for (l = 0; l < rot_reciprocal->size; l++) {
mat_multiply_matrix_vector_i3(address_double_rot,
rot_reciprocal->mat[l],
address_double);
grid_point_rot = get_grid_point_double_mesh(address_double_rot, mesh);
if (grid_point_rot > -1) { /* Invalid if even --> odd or odd --> even */
if (map[grid_point_rot] > -1) {
map[grid_point] = map[grid_point_rot];
break;
}
}
}
if (map[grid_point] == -1) {
map[grid_point] = grid_point;
num_ir++;
}
}
}
}
return num_ir;
}
static int
get_ir_reciprocal_mesh_openmp(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT * rot_reciprocal)
{
int i, j, k, l, grid_point, grid_point_rot, num_ir;
int address[3], address_double[3], address_double_rot[3];
#ifndef GRID_ORDER_XYZ
#pragma omp parallel for private(j, k, l, grid_point, grid_point_rot, address_double, address_double_rot)
for (i = 0; i < mesh[2]; i++) {
for (j = 0; j < mesh[1]; j++) {
for (k = 0; k < mesh[0]; k++) {
address[0] = k;
address[1] = j;
address[2] = i;
#else
#pragma omp parallel for private(j, k, l, grid_point, grid_point_rot, address_double, address_double_rot)
for (i = 0; i < mesh[0]; i++) {
for (j = 0; j < mesh[1]; j++) {
for (k = 0; k < mesh[2]; k++) {
address[0] = i;
address[1] = j;
address[2] = k;
#endif
for (l = 0; l < 3; l++) {
address_double[l] = address[l] * 2 + is_shift[l];
}
grid_point = get_grid_point_double_mesh(address_double, mesh);
map[grid_point] = grid_point;
reduce_grid_address(grid_address[grid_point], address, mesh);
for (l = 0; l < rot_reciprocal->size; l++) {
mat_multiply_matrix_vector_i3(address_double_rot,
rot_reciprocal->mat[l],
address_double);
grid_point_rot = get_grid_point_double_mesh(address_double_rot, mesh);
if (grid_point_rot > -1) { /* Invalid if even --> odd or odd --> even */
if (grid_point_rot < map[grid_point]) {
map[grid_point] = grid_point_rot;
}
}
}
}
}
}
num_ir = 0;
#pragma omp parallel for reduction(+:num_ir)
for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) {
if (map[i] == i) {
num_ir++;
}
}
return num_ir;
}
/* Relocate grid addresses to first Brillouin zone */
/* bz_grid_address[prod(mesh + 1)][3] */
/* bz_map[prod(mesh * 2)] */
static int relocate_BZ_grid_address(int bz_grid_address[][3],
int bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3])
{
double tolerance, min_distance;
double q_vector[3], distance[KPT_NUM_BZ_SEARCH_SPACE];
int bzmesh[3], bz_address_double[3];
int i, j, k, min_index, boundary_num_gp, total_num_gp, bzgp, gp;
tolerance = get_tolerance_for_BZ_reduction(rec_lattice, mesh);
for (i = 0; i < 3; i++) {
bzmesh[i] = mesh[i] * 2;
}
for (i = 0; i < bzmesh[0] * bzmesh[1] * bzmesh[2]; i++) {
bz_map[i] = -1;
}
boundary_num_gp = 0;
total_num_gp = mesh[0] * mesh[1] * mesh[2];
for (i = 0; i < total_num_gp; i++) {
for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) {
for (k = 0; k < 3; k++) {
q_vector[k] =
((grid_address[i][k] + kpt_bz_search_space[j][k] * mesh[k]) * 2 +
is_shift[k]) / ((double)mesh[k]) / 2;
}
mat_multiply_matrix_vector_d3(q_vector, rec_lattice, q_vector);
distance[j] = mat_norm_squared_d3(q_vector);
}
min_distance = distance[0];
min_index = 0;
for (j = 1; j < KPT_NUM_BZ_SEARCH_SPACE; j++) {
if (distance[j] < min_distance) {
min_distance = distance[j];
min_index = j;
}
}
for (j = 0; j < KPT_NUM_BZ_SEARCH_SPACE; j++) {
if (distance[j] < min_distance + tolerance) {
if (j == min_index) {
gp = i;
} else {
gp = boundary_num_gp + total_num_gp;
}
for (k = 0; k < 3; k++) {
bz_grid_address[gp][k] =
grid_address[i][k] + kpt_bz_search_space[j][k] * mesh[k];
bz_address_double[k] = bz_grid_address[gp][k] * 2 + is_shift[k];
}
bzgp = get_grid_point_double_mesh(bz_address_double, bzmesh);
bz_map[bzgp] = gp;
if (j != min_index) {
boundary_num_gp++;
}
}
}
}
return boundary_num_gp + total_num_gp;
}
static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3],
const int mesh[3])
{
int i, j;
double tolerance;
double length[3];
for (i = 0; i < 3; i++) {
length[i] = 0;
for (j = 0; j < 3; j++) {
length[i] += rec_lattice[j][i] * rec_lattice[j][i];
}
length[i] /= mesh[i] * mesh[i];
}
tolerance = length[0];
for (i = 1; i < 3; i++) {
if (tolerance < length[i]) {
tolerance = length[i];
}
}
tolerance *= 0.01;
return tolerance;
}
static int get_grid_point_double_mesh(const int address_double[3],
const int mesh[3])
{
int i, address[3];
for (i = 0; i < 3; i++) {
if (address_double[i] % 2 == 0) {
address[i] = address_double[i] / 2;
} else {
address[i] = (address_double[i] - 1) / 2;
}
}
mat_modulo_i3(address, mesh);
return get_grid_point_single_mesh(address, mesh);
}
static int get_grid_point_single_mesh(const int address[3],
const int mesh[3])
{
#ifndef GRID_ORDER_XYZ
return address[2] * mesh[0] * mesh[1] + address[1] * mesh[0] + address[0];
#else
return address[0] * mesh[1] * mesh[2] + address[1] * mesh[2] + address[2];
#endif
}
static void reduce_grid_address(int reduced_address[3],
const int address[3],
const int mesh[3])
{
int i;
for (i = 0; i < 3; i++) {
#ifndef GRID_BOUNDARY_AS_NEGATIVE
reduced_address[i] = address[i] - mesh[i] * (address[i] > mesh[i] / 2);
#else
reduced_address[i] = address[i] - mesh[i] * (address[i] >= mesh[i] / 2);
#endif
}
}
|
declare6.c | /* Example of the ref and uval modifiers in the linear clause
The ref modifier declares that the address of x is linear.
The uval modifier declares that the address of c is uniform,
and its value is linear.
*/
#pragma omp declare simd linear(ref(x)) linear(uval(c))
void increment(int& x, int& c)
{ x += c; }
void Fref(int *a, int n)
{
#pragma omp simd
for (int i=0; i<n; i++) {
increment(a[i], i);
} // End simd region
}
|
max_reduction.c | #include <float.h>
double array_max(double* restrict var, int ncells)
{
double xmax = DBL_MIN;
#pragma omp parallel for reduction(max:xmax)
for (int i = 0; i < ncells; i++){
if (var[i] > xmax) xmax = var[i];
}
}
|
GB_binop__min_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__min_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__min_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__min_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_int64)
// A*D function (colscale): GB (_AxD__min_int64)
// D*A function (rowscale): GB (_DxB__min_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__min_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__min_int64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_int64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_int64)
// C=scalar+B GB (_bind1st__min_int64)
// C=scalar+B' GB (_bind1st_tran__min_int64)
// C=A+scalar GB (_bind2nd__min_int64)
// C=A'+scalar GB (_bind2nd_tran__min_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMIN (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_INT64 || GxB_NO_MIN_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__min_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__min_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__min_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
round_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: qtang@openailab.com
*/
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
int ref_round_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread)
{
// dims size = 2 or 3
if (input_tensor->dim_num < 4)
{
float* input_data = input_tensor->data;
float* out_data = output_tensor->data;
int total_size = input_tensor->elem_num;
for (int i = 0; i < total_size; i++)
{
input_data[i] = round(out_data[i]);
}
return 0;
}
// dims size 3
else if (input_tensor->dim_num == 4)
{
int w = input_tensor->dims[3];
int h = output_tensor->dims[2];
int channels = input_tensor->dims[1];
int size = h * w;
int c_step = h * w;
float* input_data = input_tensor->data;
float* out_data = output_tensor->data;
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + c_step * q;
float* dst = out_data + c_step * q;
for (int i = 0; i < size; i++)
{
dst[i] = round(src[i]);
}
}
return 0;
}
return -1;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
// exec_node->inplace_map[0] = 0;
// exec_node->inplace_map[1] = 0;
// exec_node->inplace_map_num = 1;
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
// exec_node->inplace_map_num = 0;
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
int layout = ir_graph->graph_layout;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
// inplace inference
// if(input_tensor->data != output_tensor->data)
// {
// TLOG_ERR("input and output are not the same mem\n");
// set_tengine_errno(EFAULT);
// return -1;
// }
int ret = ref_round_fp32(input_tensor, output_tensor, exec_graph->num_thread);
if (ret != 0)
return -1;
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_round_ref_op(void* arg)
{
return register_builtin_node_ops(OP_ROUND, &hcl_node_ops);
}
int unregister_round_ref_op(void* arg)
{
return unregister_builtin_node_ops(OP_ROUND, &hcl_node_ops);
}
|
cafe_tree.c | /*! \page Tree Tree
* \code{.sh}
# tree NEWICK-formatted tree
* \endcode
*
* \code{.sh}
# tree -i treefile
* \endcode
*
A NEWICK-formatted tree containing branch lengths and taxon names as they are specified in the \ref Load "input file".
Branch lengths
should be integer units and the tree should be ultrametric (all paths from root to tip should have the same length).
If the tree is not ultrametric to a tolerance of .01%, a warning will be logged.
Please note that there should be no spaces in the tree string, nor semicolons at the end of the line.
Here is an example of an ultrametric tree followed by its NEWICK notation:
\htmlonly <style>div.image img[src="simple_tree.png"]{width:600px;}</style> \endhtmlonly
\image html simple_tree.png "(((chimp:6,human:6):81,(mouse:17,rat:17):70):6,dog:93)" width=\\textwidth
*/
#include "cafe.h"
#include<stdlib.h>
#include<math.h>
#include<mathfunc.h>
#include <chooseln_cache.h>
#include "time.h"
extern void __phylogeny_free_node(pTree ptree, pTreeNode ptnode, va_list ap1);
extern pBirthDeathCacheArray probability_cache;
extern struct chooseln_cache cache;
pTreeNode cafe_tree_new_empty_node(pTree ptree)
{
pCafeTree pcafe = (pCafeTree)ptree;
pCafeNode pcnode = (pCafeNode)memory_new(1, sizeof(CafeNode) );
pcnode->likelihoods = (double*)memory_new(pcafe->size_of_factor,sizeof(double));
pcnode->viterbi = (int*)memory_new(pcafe->size_of_factor,sizeof(int));
pcnode->birth_death_probabilities.lambda = pcafe->lambda;
pcnode->birth_death_probabilities.mu = pcafe->mu;
pcnode->familysize = -1;
pcnode->errormodel = NULL;
phylogeny_clear_node((pPhylogenyNode)pcnode);
return (pTreeNode)pcnode;
}
void cafe_tree_set_parameters(pCafeTree pcafe, family_size_range* range, double lambda)
{
int i;
pArrayList nlist = pcafe->super.nlist;
copy_range_to_tree(pcafe, range);
pcafe->lambda = lambda;
int rsize = pcafe->rfsize;
int fsize = range->max - range->min + 1;
int max_size = rsize > fsize ? rsize : fsize;
if ( pcafe->size_of_factor < max_size )
{
pcafe->size_of_factor = max_size;
for ( i = 0; i < nlist->size ; i++ )
{
pCafeNode pcnode = (pCafeNode)nlist->array[i];
pcnode->likelihoods = (double*)memory_realloc(pcnode->likelihoods,pcafe->size_of_factor,sizeof(double));
pcnode->viterbi = (int*)memory_realloc(pcnode->viterbi,pcafe->size_of_factor,sizeof(int));
}
}
}
void __cafe_tree_free_node(pTree ptree, pTreeNode ptnode, va_list ap1)
{
va_list ap;
if (ap1) va_copy(ap, ap1);
pCafeNode pcnode = (pCafeNode)ptnode;
if (pcnode->likelihoods) memory_free(pcnode->likelihoods);
pcnode->likelihoods = NULL;
if (pcnode->viterbi) memory_free(pcnode->viterbi);
pcnode->viterbi = NULL;
__phylogeny_free_node(ptree, ptnode, ap);
if (ap1) va_end(ap1);
}
void cafe_tree_free(pCafeTree pcafe)
{
if ( pcafe->super.nlist )
{
int i;
for ( i = 0 ; i < pcafe->super.nlist->size ; i++ )
{
__cafe_tree_free_node((pTree)pcafe, (pTreeNode)pcafe->super.nlist->array[i], NULL );
}
}
else
{
tree_traveral_prefix((pTree)pcafe,__cafe_tree_free_node);
}
pTree ptree = (pTree)pcafe;
if ( *ptree->count == 0 && ptree->data ) vector_free( ((pVector)ptree->data), free );
tree_free(ptree);
}
/*******************************************************************************
* Tree Output
*******************************************************************************/
void cafe_tree_string_name(pString pstr, pPhylogenyNode ptnode)
{
char buf[STRING_BUF_SIZE];
int familysize = ((pCafeNode)ptnode)->familysize;
int idx = 0;
if ( ptnode->name || familysize >= 0 )
{
if ( ptnode->name ) idx = sprintf(buf,"%s", ptnode->name);
if ( familysize >= 0) sprintf(&buf[idx],"_%d", familysize );
string_add(pstr,buf);
}
}
void cafe_tree_string_familysize_lambda(pString pstr, pPhylogenyNode ptnode)
{
int familysize = ((pCafeNode)ptnode)->familysize;
if ( ptnode->branchlength <= 0 ) return;
if ( ptnode->name ) string_fadd(pstr,"%s", ptnode->name );
if ( familysize >= 0) string_fadd(pstr,"<%d>", familysize );
double lambda = ((pCafeNode)ptnode)->birth_death_probabilities.lambda;
string_fadd(pstr,"_%lf", lambda );
}
void cafe_tree_string_familysize(pString pstr, pPhylogenyNode ptnode)
{
int familysize = ((pCafeNode)ptnode)->familysize;
if ( ptnode->branchlength <= 0 ) return;
if ( ptnode->name ) string_fadd(pstr,"%s", ptnode->name );
if ( familysize >= 0) string_fadd(pstr,"< %d >", familysize );
}
void cafe_tree_string_lambda(pString pstr, pPhylogenyNode ptnode)
{
if ( ptnode->branchlength <= 0 ) return;
if ( ptnode->name ) string_fadd(pstr,"%s", ptnode->name );
string_fadd(pstr,"_%lf", ((pCafeNode)ptnode)->birth_death_probabilities.lambda );
}
void cafe_tree_string_id(pString pstr, pPhylogenyNode pnode)
{
if ( pnode->name ) string_fadd(pstr,"%s", pnode->name );
string_fadd(pstr,"<%d>", pnode->super.id );
}
pString cafe_tree_string_with_familysize_lambda(pCafeTree pcafe)
{
return phylogeny_string((pTree)pcafe,cafe_tree_string_familysize_lambda);
}
pString cafe_tree_string_with_lambda(pCafeTree pcafe)
{
return phylogeny_string((pTree)pcafe,cafe_tree_string_lambda);
}
pString cafe_tree_string_with_familysize(pCafeTree pcafe)
{
return phylogeny_string((pTree)pcafe,cafe_tree_string_familysize);
}
pString cafe_tree_string_with_id(pCafeTree pcafe)
{
return phylogeny_string((pTree)pcafe,cafe_tree_string_id);
}
pString cafe_tree_string(pCafeTree pcafe)
{
return phylogeny_string((pTree)pcafe,cafe_tree_string_name);
}
void cafe_tree_string_print(pCafeTree pcafe)
{
pString pstr = cafe_tree_string(pcafe);
printf("%s\n", pstr->buf );
string_free(pstr);
}
/**
* \brief Set likelihood to 1 for actual value, 0 otherwise, or copy values from an existing errormodel
* Copies likelihood values from an errormodel if one exists,
* otherwise sets all likelihoods to 0 except for familysize, which is set to 1
*/
void initialize_leaf_likelihoods(pTree ptree, pTreeNode ptnode)
{
pCafeTree pcafe = (pCafeTree)ptree;
pCafeNode pcnode = (pCafeNode)ptnode;
if (pcnode->errormodel) {
memset((void*)pcnode->likelihoods, 0, pcafe->size_of_factor*sizeof(double));
for (int j = 0; j<pcafe->size_of_factor; j++) {
// conditional probability of measuring i=familysize when true count is j
pcnode->likelihoods[j] = pcnode->errormodel->errormatrix[pcnode->familysize][j];
}
}
else {
// number of likelihoods should be set from the tree's size_of_factor,
// therefore the familysize must be less than this
assert(pcnode->familysize >= 0 && pcnode->familysize < pcafe->size_of_factor);
memset((void*)pcnode->likelihoods, 0, pcafe->size_of_factor*sizeof(double));
pcnode->likelihoods[pcnode->familysize] = 1;
}
}
void compute_child_factor(pCafeTree pcafe, pCafeNode child, family_size_range* range, double *factors)
{
// p(node=c,child|s) = p(node=c|s)p(child|node=c) integrated over all c
// remember child likelihood[c]'s never sum up to become 1 because they are likelihoods conditioned on c's.
// incoming nodes to don't sum to 1. outgoing nodes sum to 1
if (!child->birthdeath_matrix)
node_set_birthdeath_matrix(child, probability_cache, pcafe->k);
assert(child->birthdeath_matrix != NULL);
square_matrix_multiply(child->birthdeath_matrix, child->likelihoods, range->root_min, range->root_max, range->min, range->max, factors);
}
void compute_internal_node_likelihood(pTree ptree, pTreeNode ptnode)
{
pCafeTree pcafe = (pCafeTree)ptree;
pCafeNode pcnode = (pCafeNode)ptnode;
family_size_range range;
range.min = pcafe->range.min;
range.max = pcafe->range.max;
if (tree_is_root(ptree, ptnode))
{
range.root_min = pcafe->range.root_min;
range.root_max = pcafe->range.root_max;
}
else
{
range.root_min = pcafe->range.min;
range.root_max = pcafe->range.max;
}
double *left_factor = memory_new(pcafe->size_of_factor, sizeof(double));
double *right_factor = memory_new(pcafe->size_of_factor, sizeof(double));
pCafeNode child1 = (pCafeNode)((pTreeNode)pcnode)->children->head->data;
pCafeNode child2 = (pCafeNode)((pTreeNode)pcnode)->children->tail->data;
#if _OPENMP > 201307 // tasks aren't supported by older OpenMP versions
#pragma omp parallel
#endif
#pragma omp single nowait
{
#pragma omp task
compute_child_factor(pcafe, child1, &range, left_factor);
#pragma omp task
compute_child_factor(pcafe, child2, &range, right_factor);
#pragma omp taskwait
int size = range.root_max - range.root_min + 1;
assert(size <= pcafe->size_of_factor);
for (int i = 0; i < size; i++)
{
pcnode->likelihoods[i] = left_factor[i] * right_factor[i];
}
}
memory_free(left_factor);
memory_free(right_factor);
}
void free_probabilities(struct probabilities *probs)
{
if (probs->param_lambdas)
{
memory_free(probs->param_lambdas);
probs->param_lambdas = NULL;
}
if (probs->param_mus)
{
memory_free(probs->param_mus);
probs->param_mus = NULL;
}
}
void compute_node_likelihoods(pTree ptree, pTreeNode ptnode, va_list ap1)
{
if (tree_is_leaf(ptnode))
{
initialize_leaf_likelihoods(ptree, ptnode);
}
else
{
compute_internal_node_likelihood(ptree, ptnode);
}
}
void compute_node_likelihoods_recursive(pTree ptree, pTreeNode ptnode)
{
if (!tree_is_leaf(ptnode))
{
#pragma omp task
{
pTreeNode child1 = (pTreeNode)(ptnode)->children->head->data;
compute_node_likelihoods_recursive(ptree, child1);
}
#pragma omp task
{
pTreeNode child2 = (pTreeNode)(ptnode)->children->tail->data;
compute_node_likelihoods_recursive(ptree, child2);
}
}
#pragma omp taskwait
compute_node_likelihoods(ptree, ptnode, NULL);
}
void compute_tree_likelihoods(pCafeTree pcafe)
{
compute_node_likelihoods_recursive((pTree)pcafe, pcafe->super.root);
}
double* get_likelihoods(const pCafeTree pcafe)
{
return ((pCafeNode)pcafe->super.root)->likelihoods;
}
/**
* \brief Initialize node with probability values that it may need.
* If multiple lambdas are set, k_bd is set to an arraylist of matrices with probability values
* In this case, values are set up to the value of num_lambdas
* otherwise the value birthdeath_matrix is used
* probability values are drawn from the cache argument, which should hold a variety
* of possible values
*/
void node_set_birthdeath_matrix(pCafeNode pcnode, pBirthDeathCacheArray cache, int num_lambdas)
{
if (pcnode->super.branchlength <= 0)
return;
if (pcnode->birth_death_probabilities.param_lambdas) {
if (pcnode->birth_death_probabilities.param_mus) {
if (num_lambdas > 0) {
for (int k = 0; k<num_lambdas; k++) {
struct square_matrix* bd = birthdeath_cache_get_matrix(cache, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[k], pcnode->birth_death_probabilities.param_mus[k]);
arraylist_add(pcnode->k_bd, bd);
}
}
else {
pcnode->birthdeath_matrix = birthdeath_cache_get_matrix(cache, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[0], pcnode->birth_death_probabilities.param_mus[0]);
}
}
else {
if (num_lambdas > 0) {
for (int k = 0; k<num_lambdas; k++) {
struct square_matrix* bd = birthdeath_cache_get_matrix(cache, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[k], pcnode->birth_death_probabilities.mu);
arraylist_add(pcnode->k_bd, bd);
}
}
else {
pcnode->birthdeath_matrix = birthdeath_cache_get_matrix(cache, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[0], pcnode->birth_death_probabilities.mu);
}
}
}
else {
pcnode->birthdeath_matrix = birthdeath_cache_get_matrix(cache, pcnode->super.branchlength, pcnode->birth_death_probabilities.lambda, pcnode->birth_death_probabilities.mu);
}
}
void add_key(pArrayList arr, double d_branchlength, double lambda, double mu)
{
int branchlength = (int)d_branchlength;
for (int i = 0; i < arr->size; ++i)
{
struct BirthDeathCacheKey *key = (struct BirthDeathCacheKey *)arraylist_get(arr, i);
if (key->branchlength == branchlength &&
key->lambda == lambda &&
key->mu == mu)
return;
}
struct BirthDeathCacheKey *key = malloc(sizeof(struct BirthDeathCacheKey));
memset(key, 0, sizeof(struct BirthDeathCacheKey));
key->branchlength = branchlength;
key->lambda = lambda;
key->mu = mu;
arraylist_add(arr, key);
}
void get_keys_from_node(pCafeNode pcnode, pArrayList arr, int num_lambdas)
{
if (pcnode->super.branchlength <= 0)
return;
if (pcnode->birth_death_probabilities.param_lambdas) {
if (pcnode->birth_death_probabilities.param_mus) {
if (num_lambdas > 0) {
for (int k = 0; k<num_lambdas; k++) {
add_key(arr, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[k], pcnode->birth_death_probabilities.param_mus[k]);
}
}
else {
add_key(arr, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[0], pcnode->birth_death_probabilities.param_mus[0]);
}
}
else {
if (num_lambdas > 0) {
for (int k = 0; k<num_lambdas; k++) {
add_key(arr, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[k], pcnode->birth_death_probabilities.mu);
}
}
else {
add_key(arr, pcnode->super.branchlength, pcnode->birth_death_probabilities.param_lambdas[0], pcnode->birth_death_probabilities.mu);
}
}
}
else {
add_key(arr, pcnode->super.branchlength, pcnode->birth_death_probabilities.lambda, pcnode->birth_death_probabilities.mu);
}
}
void do_node_set_birthdeath(pTree ptree, pTreeNode ptnode, va_list ap1)
{
va_list ap;
va_copy(ap, ap1);
pBirthDeathCacheArray cache = va_arg(ap, pBirthDeathCacheArray);
va_end(ap);
pCafeTree pcafe = (pCafeTree)ptree;
node_set_birthdeath_matrix((pCafeNode)ptnode, cache, pcafe->k);
}
void gather_keys(pTree ptree, pTreeNode ptnode, va_list ap1)
{
va_list ap;
va_copy(ap, ap1);
pArrayList arr = va_arg(ap, pArrayList);
va_end(ap);
pCafeTree pcafe = (pCafeTree)ptree;
get_keys_from_node((pCafeNode)ptnode, arr, pcafe->k);
}
void free_cache_keep_matrices(pBirthDeathCacheArray bd_cache)
{
// free the cache without deleting the matrices
void** keys = NULL;
hash_table_get_keys(bd_cache->table, &keys);
free(keys);
hash_table_delete(bd_cache->table);
memory_free(bd_cache);
}
/**
* Set each node's birthdeath matrix based on its values of branchlength, lambdas, and mus
**/
pBirthDeathCacheArray cafe_tree_set_birthdeath(pCafeTree pcafe, int max_family_size)
{
pArrayList arr = arraylist_new(40);
tree_traveral_prefix((pTree)pcafe, gather_keys, arr);
pBirthDeathCacheArray bd_cache = birthdeath_cache_init(max_family_size, &cache);
#pragma omp parallel
#pragma omp for
for (int i = 0; i < arr->size; ++i)
{
struct BirthDeathCacheKey* key = (struct BirthDeathCacheKey*)arraylist_get(arr, i);
struct square_matrix *matrix = compute_birthdeath_rates(key->branchlength, key->lambda, key->mu, max_family_size);
#pragma omp critical
hash_table_add(bd_cache->table, key, sizeof(struct BirthDeathCacheKey), matrix, sizeof(struct square_matrix*));
}
tree_traveral_prefix((pTree)pcafe, do_node_set_birthdeath, bd_cache);
arraylist_free(arr, NULL);
return bd_cache;
}
void cafe_tree_node_copy(pTreeNode psrc, pTreeNode pdest)
{
pCafeNode pcsrc, pcdest;
pcsrc = (pCafeNode)psrc;
pcdest = (pCafeNode)pdest;
phylogeny_node_copy(psrc,pdest);
pcdest->birth_death_probabilities.lambda = pcsrc->birth_death_probabilities.lambda;
pcdest->familysize = pcsrc->familysize;
pcdest->birthdeath_matrix = pcsrc->birthdeath_matrix;
}
void __cafe_tree_copy_new_fill(pCafeTree psrc, pCafeTree pdest )
{
pdest->size_of_factor = psrc->size_of_factor;
pdest->range.min = psrc->range.min;
pdest->range.max = psrc->range.max;
pdest->range.root_min = psrc->range.root_min;
pdest->range.root_max = psrc->range.root_max;
pdest->lambda = psrc->lambda;
pdest->rfsize = psrc->rfsize;
}
pCafeTree cafe_tree_copy(pCafeTree psrc)
{
pCafeTree pcafe = (pCafeTree)tree_copy((pTree)psrc,
cafe_tree_new_empty_node,
cafe_tree_node_copy );
__cafe_tree_copy_new_fill(psrc,pcafe);
tree_build_node_list((pTree)pcafe);
return pcafe;
}
pCafeTree cafe_tree_split(pCafeTree pcafe, int idx )
{
pCafeTree psub = (pCafeTree)phylogeny_split_tree((pTree)pcafe,idx, __cafe_tree_free_node );
if ( psub )
{
__cafe_tree_copy_new_fill(pcafe,psub);
}
free_cache_keep_matrices(cafe_tree_set_birthdeath(pcafe, probability_cache->maxFamilysize));
free_cache_keep_matrices(cafe_tree_set_birthdeath(psub, probability_cache->maxFamilysize));
return psub;
}
/*******************************************************************************
* Random family size
*******************************************************************************/
void __cafe_tree_node_random_familysize(pTree ptree, pTreeNode pnode, va_list ap1)
{
va_list ap;
va_copy(ap, ap1);
int *max = va_arg(ap, int*);
int max_family_size = va_arg(ap, int);
va_end(ap);
if ( tree_is_root(ptree,pnode) ) return;
double rnd = unifrnd();
double cumul = 0;
pCafeNode pcnode = (pCafeNode)pnode;
int parent_family_size = ((pCafeNode)pnode->parent)->familysize;
int c = 0;
for (; c < max_family_size-1; c++ )
{
cumul += square_matrix_get(pcnode->birthdeath_matrix, parent_family_size, c);
if ( cumul >= rnd ) break;
}
pcnode->familysize = c;
if (*max < pcnode->familysize)
{
*max = pcnode->familysize;
}
}
/**
* Sets the family size of each node to random value between 0 and the tree's pbdc_array maxFamilySize.
**/
int cafe_tree_random_familysize(pCafeTree pcafe, int rootFamilysize, int maxFamilySize)
{
int max = 0;
((pCafeNode)pcafe->super.root)->familysize = rootFamilysize;
tree_traveral_prefix( (pTree)pcafe, __cafe_tree_node_random_familysize, &max, maxFamilySize);
return max;
}
void initialize_leaf_likelihood_clustered(pTree ptree, pTreeNode ptnode)
{
int root_min, root_max;
pCafeTree pcafe = (pCafeTree)ptree;
pCafeNode pcnode = (pCafeNode)ptnode;
int s, i, j, k;
if (tree_is_root(ptree, ptnode->parent))
{
root_min = pcafe->range.root_min;
root_max = pcafe->range.root_max;
}
else
{
root_min = pcafe->range.min;
root_max = pcafe->range.max;
}
for (s = root_min, i = 0; s <= root_max; s++, i++)
{
for (k = 0; k < pcafe->k; k++) {
if (pcnode->familysize < 0) {
//fprintf(stderr, "family size not set\n");
pcnode->k_likelihoods[k][i] = 1;
}
else {
if (pcnode->errormodel) {
memset((void*)pcnode->k_likelihoods[k], 0, pcafe->size_of_factor * sizeof(double));
for (j = 0; j<pcafe->size_of_factor; j++) {
// conditional probability of measuring i=familysize when true count is j
pcnode->k_likelihoods[k][j] = pcnode->errormodel->errormatrix[pcnode->familysize][j];
}
}
else {
memset((void*)pcnode->k_likelihoods[k], 0, pcafe->size_of_factor * sizeof(double));
pcnode->k_likelihoods[k][pcnode->familysize] = 1;
}
}
}
}
}
void compute_internal_node_likelihood_clustered(pTree ptree, pTreeNode ptnode)
{
family_size_range family_size;
pCafeTree pcafe = (pCafeTree)ptree;
pCafeNode pcnode = (pCafeNode)ptnode;
double *tree_factors[2];
tree_factors[0] = memory_new(pcafe->size_of_factor, sizeof(double));
tree_factors[1] = memory_new(pcafe->size_of_factor, sizeof(double));
int s, i, k;
double lambda = -1;
double mu = -1;
if (tree_is_root(ptree, ptnode))
{
family_size.root_min = pcafe->range.root_min;
family_size.root_max = pcafe->range.root_max;
family_size.min = pcafe->range.min;
family_size.max = pcafe->range.max;
}
else
{
family_size.root_min = pcafe->range.min;
family_size.root_max = pcafe->range.max;
family_size.min = pcafe->range.min;
family_size.max = pcafe->range.max;
}
int idx;
double *factors[2] = { NULL, NULL };
pCafeNode child[2] = { (pCafeNode)((pTreeNode)pcnode)->children->head->data,
(pCafeNode)((pTreeNode)pcnode)->children->tail->data };
for (k = 0; k < pcafe->k; k++)
{
lambda = pcnode->birth_death_probabilities.param_lambdas[k];
if (pcnode->birth_death_probabilities.param_mus) {
mu = pcnode->birth_death_probabilities.param_mus[k];
}
// for each child
for (idx = 0; idx < 2; idx++)
{
factors[idx] = tree_factors[idx];
memset(factors[idx], 0, pcafe->size_of_factor * sizeof(double));
for (s = family_size.root_min, i = 0; s <= family_size.root_max; s++, i++)
{
for (int c = family_size.min, j = 0; c <= family_size.max; c++, j++)
{
factors[idx][i] += birthdeath_likelihood_with_s_c(s, c, child[idx]->super.branchlength, lambda, mu, NULL) * child[idx]->k_likelihoods[k][j];
}
}
}
int size = family_size.root_max - family_size.root_min + 1;
for (i = 0; i < size; i++)
{
pcnode->k_likelihoods[k][i] = factors[0][i] * factors[1][i];
}
}
memory_free(tree_factors[0]);
memory_free(tree_factors[1]);
}
void compute_node_clustered_likelihood(pTree ptree, pTreeNode ptnode, va_list ap1)
{
va_list ap;
va_copy(ap, ap1);
struct chooseln_cache *ln_cache = va_arg(ap, struct chooseln_cache *);
va_end(ap);
pCafeTree pcafe = (pCafeTree)ptree;
int maxFamilySize = MAX(pcafe->range.root_max, pcafe->range.max);
if (!chooseln_is_init2(ln_cache))
{
chooseln_cache_init2(ln_cache, maxFamilySize);
}
else if (get_chooseln_cache_size2(ln_cache) < maxFamilySize)
{
chooseln_cache_resize2(ln_cache, maxFamilySize);
}
if (tree_is_leaf(ptnode))
{
initialize_leaf_likelihood_clustered(ptree, ptnode);
}
else
{
compute_internal_node_likelihood_clustered(ptree, ptnode);
}
}
void __cafe_tree_node_compute_clustered_likelihood_using_cache(pTree ptree, pTreeNode ptnode, va_list ap1)
{
va_list ap;
va_copy(ap, ap1);
struct chooseln_cache *ln_cache = va_arg(ap, struct chooseln_cache *);
va_end(ap);
pCafeTree pcafe = (pCafeTree)ptree;
pCafeNode pcnode = (pCafeNode)ptnode;
double *tree_factors[2];
tree_factors[0] = memory_new(pcafe->size_of_factor, sizeof(double));
tree_factors[1] = memory_new(pcafe->size_of_factor, sizeof(double));
int size;
int s, c, i, j, k;
family_size_range family_size;
double** bd = NULL;
int maxFamilySize = MAX(pcafe->range.root_max, pcafe->range.max);
if (!chooseln_is_init2(ln_cache))
{
chooseln_cache_init2(ln_cache, maxFamilySize);
}
else if (get_chooseln_cache_size2(ln_cache) < maxFamilySize)
{
chooseln_cache_resize2(ln_cache, maxFamilySize);
}
if (tree_is_leaf(ptnode))
{
if (tree_is_root(ptree, ptnode->parent))
{
family_size.root_min = pcafe->range.root_min;
family_size.root_max = pcafe->range.root_max;
}
else
{
family_size.root_min = pcafe->range.min;
family_size.root_max = pcafe->range.max;
}
for (s = family_size.root_min, i = 0; s <= family_size.root_max; s++, i++)
{
// pcnode->likelihoods[i] = pcnode->bd[s][pcnode->familysize];
for (k = 0; k < pcafe->k; k++) {
if (pcnode->familysize < 0) {
//fprintf(stderr, "family size not set\n");
pcnode->k_likelihoods[k][i] = 1;
}
else {
if (pcnode->errormodel) {
memset((void*)pcnode->k_likelihoods[k], 0, pcafe->size_of_factor * sizeof(double));
for (j = 0; j<pcafe->size_of_factor; j++) {
// conditional probability of measuring i=familysize when true count is j
pcnode->k_likelihoods[k][j] = pcnode->errormodel->errormatrix[pcnode->familysize][j];
}
}
else {
//bd = pcnode->k_bd->array[k];
//pcnode->k_likelihoods[k][i] = bd[s][pcnode->familysize];
memset((void*)pcnode->k_likelihoods[k], 0, pcafe->size_of_factor * sizeof(double));
pcnode->k_likelihoods[k][pcnode->familysize] = 1;
}
}
}
}
}
else
{
if (tree_is_root(ptree, ptnode))
{
family_size.root_min = pcafe->range.root_min;
family_size.root_max = pcafe->range.root_max;
family_size.min = pcafe->range.min;
family_size.max = pcafe->range.max;
}
else
{
family_size.root_min = pcafe->range.min;
family_size.root_max = pcafe->range.max;
family_size.min = pcafe->range.min;
family_size.max = pcafe->range.max;
}
int idx;
double *factors[2] = { NULL, NULL };
pCafeNode child[2] = { (pCafeNode)((pTreeNode)pcnode)->children->head->data,
(pCafeNode)((pTreeNode)pcnode)->children->tail->data };
for (k = 0; k < pcafe->k; k++)
{
// for each child
for (idx = 0; idx < 2; idx++)
{
{
factors[idx] = tree_factors[idx];
memset(factors[idx], 0, pcafe->size_of_factor * sizeof(double));
bd = child[idx]->k_bd->array[k];
for (s = family_size.root_min, i = 0; s <= family_size.root_max; s++, i++)
{
for (c = family_size.min, j = 0; c <= family_size.max; c++, j++)
{
factors[idx][i] += bd[s][c] * child[idx]->k_likelihoods[k][j];
}
}
}
}
size = family_size.root_max - family_size.root_min + 1;
for (i = 0; i < size; i++)
{
pcnode->k_likelihoods[k][i] = factors[0][i] * factors[1][i];
}
}
}
memory_free(tree_factors[0]);
memory_free(tree_factors[1]);
}
void cafe_tree_node_free_clustered_likelihoods(pCafeParam param)
{
int i;
pArrayList nlist = param->pcafe->super.nlist;
pTree tlambda = param->lambda_tree;
if (tlambda == NULL)
{
for (i = 0; i < nlist->size; i++)
{
pCafeNode pcnode = (pCafeNode)nlist->array[i];
free_probabilities(&pcnode->birth_death_probabilities);
if (pcnode->k_likelihoods) { memory_free(pcnode->k_likelihoods); pcnode->k_likelihoods = NULL; }
if (pcnode->k_bd) { arraylist_free(pcnode->k_bd, NULL); pcnode->k_bd = NULL; }
}
}
}
double** cafe_tree_clustered_likelihood(pCafeTree pcafe, struct chooseln_cache *ln_cache)
{
if (probability_cache)
{
tree_traveral_postfix((pTree)pcafe, __cafe_tree_node_compute_clustered_likelihood_using_cache, ln_cache);
}
else
{
tree_traveral_postfix((pTree)pcafe, compute_node_clustered_likelihood, ln_cache);
}
return ((pCafeNode)pcafe->super.root)->k_likelihoods;
}
|
GB_unop__identity_int32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int32_fc32
// op(A') function: GB_unop_tran__identity_int32_fc32
// C type: int32_t
// A type: GxB_FC32_t
// cast: int32_t cij = GB_cast_to_int32_t ((double) crealf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = GB_cast_to_int32_t ((double) crealf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = GB_cast_to_int32_t ((double) crealf (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int32_fc32
(
int32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__pow_fc32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__pow_fc32
// A.*B function (eWiseMult): GB_AemultB__pow_fc32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__pow_fc32
// C+=b function (dense accum): GB_Cdense_accumb__pow_fc32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_fc32
// C=scalar+B GB_bind1st__pow_fc32
// C=scalar+B' GB_bind1st_tran__pow_fc32
// C=A+scalar GB_bind2nd__pow_fc32
// C=A'+scalar GB_bind2nd_tran__pow_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// B,b type: GxB_FC32_t
// BinaryOp: cij = GB_cpowf (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
GxB_FC32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_cpowf (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_FC32 || GxB_NO_POW_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__pow_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__pow_fc32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__pow_fc32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *GB_RESTRICT Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__pow_fc32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__pow_fc32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__pow_fc32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = Bx [p] ;
Cx [p] = GB_cpowf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__pow_fc32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = Ax [p] ;
Cx [p] = GB_cpowf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_cpowf (x, aij) ; \
}
GrB_Info GB_bind1st_tran__pow_fc32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = Ax [pA] ; \
Cx [pC] = GB_cpowf (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__pow_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_bool_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_int16
// op(A') function: GB_tran__minv_bool_int16
// C type: bool
// A type: int16_t
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, aij) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_int16
(
bool *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_AxB_dot2.c | //------------------------------------------------------------------------------
// GB_AxB_dot2: compute C=A'*B or C<!M>=A'*B in parallel, in-place
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// This method always constructs C as bitmap; it then converts C to sparse or
// hyper if A or B are hypersparse. The C<M>=A'*B dot product when C is sparse
// is computed by GB_AxB_dot3. This method handles the case when C is bitmap.
// TODO: this is slower than it could be if A and B are both bitmap, when
// A->vlen is large, and likely if A and B are both either bitmap or full.
// This is because the inner loop is a simple full/bitmap dot product, across
// the entire input vectors. No tiling is used, so cache performance is not
// as good as it could be. For large problems, C=(A')*B is faster with
// the saxpy3 method, as compared to this method with C=A'*B.
#include "GB_mxm.h"
#include "GB_subref.h"
#include "GB_binop.h"
#include "GB_ek_slice.h"
#include "GB_bitmap_assign_methods.h"
#ifndef GBCOMPACT
#include "GB_AxB__include.h"
#endif
#define GB_FREE_ALL \
{ \
GB_Matrix_free (&M2) ; \
GB_FREE (A_slice) ; \
GB_FREE (B_slice) ; \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
}
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_AxB_dot2 // C=A'*B or C<!M>=A'*B, dot product method
(
GrB_Matrix *Chandle, // output matrix
const GrB_Matrix M_in, // mask matrix for C<!M>=A'*B, may be NULL
const bool Mask_comp, // if true, use !M
const bool Mask_struct, // if true, use the only structure of M
const GrB_Matrix A_in, // input matrix
const GrB_Matrix B_in, // input matrix
const GrB_Semiring semiring, // semiring that defines C=A*B
const bool flipxy, // if true, do z=fmult(b,a) vs fmult(a,b)
GB_Context Context
)
{
// double ttt = omp_get_wtime ( ) ;
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (Chandle != NULL) ;
ASSERT (*Chandle == NULL) ;
ASSERT_MATRIX_OK_OR_NULL (M_in, "M for dot A'*B", GB0) ;
ASSERT_MATRIX_OK (A_in, "A for dot A'*B", GB0) ;
ASSERT_MATRIX_OK (B_in, "B for dot A'*B", GB0) ;
ASSERT (!GB_ZOMBIES (M_in)) ;
ASSERT (GB_JUMBLED_OK (M_in)) ;
ASSERT (!GB_PENDING (M_in)) ;
ASSERT (!GB_ZOMBIES (A_in)) ;
ASSERT (!GB_JUMBLED (A_in)) ;
ASSERT (!GB_PENDING (A_in)) ;
ASSERT (!GB_ZOMBIES (B_in)) ;
ASSERT (!GB_JUMBLED (B_in)) ;
ASSERT (!GB_PENDING (B_in)) ;
ASSERT_SEMIRING_OK (semiring, "semiring for numeric A'*B", GB0) ;
(*Chandle) = NULL ;
GrB_Matrix M, M2 = NULL ;
int64_t *GB_RESTRICT A_slice = NULL ;
int64_t *GB_RESTRICT B_slice = NULL ;
int64_t *GB_RESTRICT pstart_Mslice = NULL ;
int64_t *GB_RESTRICT kfirst_Mslice = NULL ;
int64_t *GB_RESTRICT klast_Mslice = NULL ;
ASSERT (A_in->vlen == B_in->vlen) ;
ASSERT (A_in->vlen > 0) ;
if (M_in == NULL)
{
GBURBLE ("(%s=%s'*%s) ",
GB_sparsity_char (GxB_BITMAP),
GB_sparsity_char_matrix (A_in),
GB_sparsity_char_matrix (B_in)) ;
}
else
{
GBURBLE ("(%s%s%s%s%s=%s'*%s) ",
GB_sparsity_char (GxB_BITMAP),
Mask_struct ? "{" : "<",
Mask_comp ? "!" : "",
GB_sparsity_char_matrix (M_in),
Mask_struct ? "}" : ">",
GB_sparsity_char_matrix (A_in),
GB_sparsity_char_matrix (B_in)) ;
}
//--------------------------------------------------------------------------
// construct shallow copies of A and B, if hypersparse
//--------------------------------------------------------------------------
// If A_in is hypersparse, a new sparse matrix A is constructed with
// A->vdim = A_in->nvec and the same vlen as A_in, and then the packed
// C->vlen will equal A->vdim < cvlen_final.
// If B_in is hypersparse, a new sparse matrix B is constructed with
// B->vdim = B_in->nvec and the same vlen as B_in, and then the packed
// C->vdim will equal B->vdim < cvdim_final.
int64_t cvlen_final = A_in->vdim ;
int64_t cvdim_final = B_in->vdim ;
bool A_is_hyper = GB_IS_HYPERSPARSE (A_in) ;
bool B_is_hyper = GB_IS_HYPERSPARSE (B_in) ;
bool A_or_B_hyper = A_is_hyper || B_is_hyper ;
GrB_Index *GB_RESTRICT Ah = A_in->h ;
GrB_Index *GB_RESTRICT Bh = B_in->h ;
struct GB_Matrix_opaque A_header, B_header ;
GrB_Matrix A = (A_is_hyper) ? GB_hyper_pack (&A_header, A_in) : A_in ;
GrB_Matrix B = (B_is_hyper) ? GB_hyper_pack (&B_header, B_in) : B_in ;
ASSERT (!GB_IS_HYPERSPARSE (A)) ;
ASSERT (!GB_IS_HYPERSPARSE (B)) ;
//--------------------------------------------------------------------------
// determine the size of C
//--------------------------------------------------------------------------
int64_t cnvec = B->nvec ;
int64_t cvlen = A->vdim ;
int64_t cvdim = B->vdim ;
int64_t cnz ;
if (!GB_Index_multiply ((GrB_Index *) (&cnz), cvlen, cvdim))
{
// problem too large
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// extract the submask if A or B are hypersparse
//--------------------------------------------------------------------------
if (A_or_B_hyper && M_in != NULL)
{
// M2 = M_in (Ah, Bh)
GB_OK (GB_subref (&M2, M_in->is_csc, M_in,
(A_is_hyper) ? Ah : GrB_ALL, cvlen,
(B_is_hyper) ? Bh : GrB_ALL, cvdim, false, Context)) ;
// TODO: if Mask_struct is true, only extract the pattern of M_in
M = M2 ;
ASSERT_MATRIX_OK_OR_NULL (M, "M submask dot A'*B", GB0) ;
}
else
{
// use the mask as-is
M = M_in ;
}
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
int64_t naslice = 0 ;
int64_t nbslice = 0 ;
int64_t anvec = A->nvec ;
int64_t anz = GB_NNZ_HELD (A) ;
int64_t bnvec = B->nvec ;
int64_t bnz = GB_NNZ_HELD (B) ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + bnz, chunk, nthreads_max) ;
#define GB_NTASKS_PER_THREAD 32
if (nthreads == 1)
{
// do the entire computation with a single thread
naslice = 1 ;
nbslice = 1 ;
}
else
{
// determine number of slices for A' and B
if (bnvec == 1)
{
// C and B are single vectors
naslice = GB_NTASKS_PER_THREAD * nthreads ;
nbslice = 1 ;
}
else if (anvec == 1 || bnvec == 0
|| bnvec > GB_NTASKS_PER_THREAD * nthreads)
{
// A is a single vector, or B is empty, or B is large: just slice B
naslice = 1 ;
nbslice = GB_NTASKS_PER_THREAD * nthreads ;
}
else
{
// slice B into individual vectors
nbslice = bnvec ;
// slice A' to get a total of about 16*nthreads tasks
naslice = (GB_NTASKS_PER_THREAD * nthreads) / nbslice ;
// but do not slice A too finely
naslice = GB_IMIN (naslice, anvec/4) ;
naslice = GB_IMAX (naslice, nthreads) ;
}
}
//--------------------------------------------------------------------------
// get the semiring operators
//--------------------------------------------------------------------------
GrB_BinaryOp mult = semiring->multiply ;
GrB_Monoid add = semiring->add ;
ASSERT (mult->ztype == add->op->ztype) ;
bool A_is_pattern, B_is_pattern ;
GB_AxB_pattern (&A_is_pattern, &B_is_pattern, flipxy, mult->opcode) ;
//--------------------------------------------------------------------------
// allocate workspace and slice A and B
//--------------------------------------------------------------------------
// A and B can have any sparsity: full, bitmap, sparse, or hypersparse.
// C is always created as bitmap
if (!GB_pslice (&A_slice, A->p, A->nvec, naslice, false) ||
!GB_pslice (&B_slice, B->p, B->nvec, nbslice, false))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// ttt = omp_get_wtime ( ) - ttt ;
// GB_Global_timing_add (17, ttt) ;
// ttt = omp_get_wtime ( ) ;
//--------------------------------------------------------------------------
// allocate C
//--------------------------------------------------------------------------
// if M is sparse/hyper, then calloc C->b; otherwise use malloc
bool M_is_sparse_or_hyper = (M != NULL) &&
(GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ;
GrB_Type ctype = add->op->ztype ;
GB_OK (GB_new_bix (Chandle, // bitmap, new header
ctype, cvlen, cvdim, GB_Ap_malloc, true,
GxB_BITMAP, M_is_sparse_or_hyper, B->hyper_switch, cnvec, cnz, true,
Context)) ;
GrB_Matrix C = (*Chandle) ;
// ttt = omp_get_wtime ( ) - ttt ;
// GB_Global_timing_add (18, ttt) ;
// ttt = omp_get_wtime ( ) ;
//--------------------------------------------------------------------------
// if M is sparse/hyper, scatter it into the C bitmap
//--------------------------------------------------------------------------
if (M_is_sparse_or_hyper)
{
// FUTURE:: could just set Cb [pC] = 2 since Cb has just been calloc'd.
// However, in the future, this method might be able to modify C on
// input, in which case C->b will not be all zero.
int mthreads = GB_nthreads (GB_NNZ (M) + M->nvec, chunk, nthreads_max) ;
int mtasks = (mthreads == 1) ? 1 : (8 * mthreads) ;
if (!GB_ek_slice (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice,
M, &mtasks))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// Cb [pC] += 2 for each entry M(i,j) in the mask
GB_bitmap_M_scatter (C,
NULL, 0, GB_ALL, NULL, NULL, 0, GB_ALL, NULL,
M, Mask_struct, GB_ASSIGN, GB_BITMAP_M_SCATTER_PLUS_2,
pstart_Mslice, kfirst_Mslice, klast_Mslice,
mthreads, mtasks, Context) ;
// the bitmap of C now contains:
// Cb (i,j) = 0: cij not present, mij zero
// Cb (i,j) = 1: cij present, mij zero (not used yet)
// Cb (i,j) = 2: cij not present, mij 1
// Cb (i,j) = 3: cij present, mij 1 (not used yet)
}
//--------------------------------------------------------------------------
// C<#>=A'*B, computing each entry with a dot product, via builtin semiring
//--------------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
//----------------------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------------------
#define GB_Adot2B(add,mult,xname) GB_Adot2B_ ## add ## mult ## xname
#define GB_AxB_WORKER(add,mult,xname) \
{ \
info = GB_Adot2B (add,mult,xname) (C, M, Mask_comp, Mask_struct, \
A, A_is_pattern, A_slice, B, B_is_pattern, B_slice, \
nthreads, naslice, nbslice) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//----------------------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------------------
GB_Opcode mult_opcode, add_opcode ;
GB_Type_code xcode, ycode, zcode ;
if (GB_AxB_semiring_builtin (A, A_is_pattern, B, B_is_pattern, semiring,
flipxy, &mult_opcode, &add_opcode, &xcode, &ycode, &zcode))
{
#include "GB_AxB_factory.c"
}
ASSERT (info == GrB_SUCCESS || info == GrB_NO_VALUE) ;
#endif
//--------------------------------------------------------------------------
// C = A'*B, computing each entry with a dot product, with typecasting
//--------------------------------------------------------------------------
if (!done)
{
#define GB_DOT2_GENERIC
GB_BURBLE_MATRIX (C, "(generic C%s=A'*B) ", (M == NULL) ? "" :
(Mask_comp ? "<!M>" : "<M>")) ;
#include "GB_AxB_dot_generic.c"
}
//--------------------------------------------------------------------------
// free workspace
//--------------------------------------------------------------------------
GB_FREE_ALL ;
C->magic = GB_MAGIC ;
ASSERT_MATRIX_OK (C, "dot2: C = A'*B output", GB0) ;
ASSERT (!GB_ZOMBIES (C)) ;
//--------------------------------------------------------------------------
// unpack C if A or B are hypersparse
//--------------------------------------------------------------------------
if (A_or_B_hyper)
{
//----------------------------------------------------------------------
// unpack C from bitmap to sparse/hyper
//----------------------------------------------------------------------
// C is currently A_in->nvec by B_in->nvec, in bitmap form. It must be
// unpacked into sparse/hypersparse form, with zombies.
//----------------------------------------------------------------------
// allocate the sparse/hypersparse structure of the final C
//----------------------------------------------------------------------
int64_t *GB_RESTRICT Cp = GB_MALLOC (cvdim+1, int64_t) ;
int64_t *GB_RESTRICT Ch =
B_is_hyper ? GB_MALLOC (cvdim, int64_t) : NULL ;
int64_t *GB_RESTRICT Ci = GB_MALLOC (cnz, int64_t) ;
if (Cp == NULL || (B_is_hyper && Ch == NULL) || Ci == NULL)
{
// out of memory
GB_Matrix_free (Chandle) ;
GB_FREE (Cp) ;
GB_FREE (Ch) ;
GB_FREE (Ci) ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// construct the hyperlist of C, if B is hypersparse
//----------------------------------------------------------------------
nthreads = GB_nthreads (cvdim, chunk, nthreads_max) ;
if (B_is_hyper)
{
// C becomes hypersparse
ASSERT (cvdim == B_in->nvec) ;
GB_memcpy (Ch, B_in->h, cvdim * sizeof (int64_t), nthreads) ;
}
//----------------------------------------------------------------------
// construct the vector pointers of C
//----------------------------------------------------------------------
int64_t pC ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cvdim+1 ; pC++)
{
Cp [pC] = pC * cvlen ;
}
//----------------------------------------------------------------------
// construct the pattern of C from its bitmap
//----------------------------------------------------------------------
// C(i,j) becomes a zombie if not present in the bitmap
nthreads = GB_nthreads (cnz, chunk, nthreads_max) ;
int8_t *GB_RESTRICT Cb = C->b ;
if (A_is_hyper)
{
ASSERT (cvlen == A_in->nvec) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cnz ; pC++)
{
int64_t i = Ah [pC % cvlen] ;
Ci [pC] = (Cb [pC]) ? i : GB_FLIP (i) ;
}
}
else
{
ASSERT (cvlen == cvlen_final && cvlen == A->vdim) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (pC = 0 ; pC < cnz ; pC++)
{
int64_t i = pC % cvlen ;
Ci [pC] = (Cb [pC]) ? i : GB_FLIP (i) ;
}
}
//----------------------------------------------------------------------
// transplant the new content and finalize C
//----------------------------------------------------------------------
C->p = Cp ; Cp = NULL ;
C->h = Ch ; Ch = NULL ;
C->i = Ci ; Ci = NULL ;
C->nzombies = cnz - C->nvals ;
C->vdim = cvdim_final ;
C->vlen = cvlen_final ;
C->nvals = -1 ;
C->nvec = cvdim ;
C->plen = cvdim ;
C->nvec_nonempty = (cvlen == 0) ? 0 : cvdim ;
// free the bitmap
GB_FREE (C->b) ;
// C is now sparse or hypersparse
ASSERT_MATRIX_OK (C, "dot2: unpacked C", GB0) ;
ASSERT (GB_ZOMBIES_OK (C)) ;
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
ASSERT (*Chandle == C) ;
ASSERT (GB_ZOMBIES_OK (C)) ;
ASSERT (!GB_JUMBLED (C)) ;
ASSERT (!GB_PENDING (C)) ;
// ttt = omp_get_wtime ( ) - ttt ;
// GB_Global_timing_add (19, ttt) ;
// ttt = omp_get_wtime ( ) ;
return (GrB_SUCCESS) ;
}
|
lenet.c | #include "lenet.h"
#include <memory.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#define GETLENGTH(array) (sizeof(array)/sizeof(*(array)))
#define GETCOUNT(array) (sizeof(array)/sizeof(double))
#define FOREACH(i,count) for (int i = 0; i < count; ++i)
#define CONVOLUTE_VALID(input,output,weight) \
{ \
FOREACH(o0,GETLENGTH(output)) \
FOREACH(o1,GETLENGTH(*(output))) \
FOREACH(w0,GETLENGTH(weight)) \
FOREACH(w1,GETLENGTH(*(weight))) \
(output)[o0][o1] += (input)[o0 + w0][o1 + w1] * (weight)[w0][w1]; \
}
#define CONVOLUTE_FULL(input,output,weight) \
{ \
FOREACH(i0,GETLENGTH(input)) \
FOREACH(i1,GETLENGTH(*(input))) \
FOREACH(w0,GETLENGTH(weight)) \
FOREACH(w1,GETLENGTH(*(weight))) \
(output)[i0 + w0][i1 + w1] += (input)[i0][i1] * (weight)[w0][w1]; \
}
#define CONVOLUTION_FORWARD(input,output,weight,bias,action) \
{ \
for (int x = 0; x < GETLENGTH(weight); ++x) \
for (int y = 0; y < GETLENGTH(*weight); ++y) \
CONVOLUTE_VALID(input[x], output[y], weight[x][y]); \
FOREACH(j, GETLENGTH(output)) \
FOREACH(i, GETCOUNT(output[j])) \
((double *)output[j])[i] = action(((double *)output[j])[i] + bias[j]); \
}
#define CONVOLUTION_BACKWARD(input,inerror,outerror,weight,wd,bd,actiongrad)\
{ \
for (int x = 0; x < GETLENGTH(weight); ++x) \
for (int y = 0; y < GETLENGTH(*weight); ++y) \
CONVOLUTE_FULL(outerror[y], inerror[x], weight[x][y]); \
FOREACH(i, GETCOUNT(inerror)) \
((double *)inerror)[i] *= actiongrad(((double *)input)[i]); \
FOREACH(j, GETLENGTH(outerror)) \
FOREACH(i, GETCOUNT(outerror[j])) \
bd[j] += ((double *)outerror[j])[i]; \
for (int x = 0; x < GETLENGTH(weight); ++x) \
for (int y = 0; y < GETLENGTH(*weight); ++y) \
CONVOLUTE_VALID(input[x], wd[x][y], outerror[y]); \
}
#define SUBSAMP_MAX_FORWARD(input,output) \
{ \
const int len0 = GETLENGTH(*(input)) / GETLENGTH(*(output)); \
const int len1 = GETLENGTH(**(input)) / GETLENGTH(**(output)); \
FOREACH(i, GETLENGTH(output)) \
FOREACH(o0, GETLENGTH(*(output))) \
FOREACH(o1, GETLENGTH(**(output))) \
{ \
int x0 = 0, x1 = 0, ismax; \
FOREACH(l0, len0) \
FOREACH(l1, len1) \
{ \
ismax = input[i][o0*len0 + l0][o1*len1 + l1] > input[i][o0*len0 + x0][o1*len1 + x1];\
x0 += ismax * (l0 - x0); \
x1 += ismax * (l1 - x1); \
} \
output[i][o0][o1] = input[i][o0*len0 + x0][o1*len1 + x1]; \
} \
}
#define SUBSAMP_MAX_BACKWARD(input,inerror,outerror) \
{ \
const int len0 = GETLENGTH(*(inerror)) / GETLENGTH(*(outerror)); \
const int len1 = GETLENGTH(**(inerror)) / GETLENGTH(**(outerror)); \
FOREACH(i, GETLENGTH(outerror)) \
FOREACH(o0, GETLENGTH(*(outerror))) \
FOREACH(o1, GETLENGTH(**(outerror))) \
{ \
int x0 = 0, x1 = 0, ismax; \
FOREACH(l0, len0) \
FOREACH(l1, len1) \
{ \
ismax = input[i][o0*len0 + l0][o1*len1 + l1] > input[i][o0*len0 + x0][o1*len1 + x1];\
x0 += ismax * (l0 - x0); \
x1 += ismax * (l1 - x1); \
} \
inerror[i][o0*len0 + x0][o1*len1 + x1] = outerror[i][o0][o1]; \
} \
}
#define DOT_PRODUCT_FORWARD(input,output,weight,bias,action) \
{ \
for (int x = 0; x < GETLENGTH(weight); ++x) \
for (int y = 0; y < GETLENGTH(*weight); ++y) \
((double *)output)[y] += ((double *)input)[x] * weight[x][y]; \
FOREACH(j, GETLENGTH(bias)) \
((double *)output)[j] = action(((double *)output)[j] + bias[j]); \
}
#define DOT_PRODUCT_BACKWARD(input,inerror,outerror,weight,wd,bd,actiongrad) \
{ \
for (int x = 0; x < GETLENGTH(weight); ++x) \
for (int y = 0; y < GETLENGTH(*weight); ++y) \
((double *)inerror)[x] += ((double *)outerror)[y] * weight[x][y]; \
FOREACH(i, GETCOUNT(inerror)) \
((double *)inerror)[i] *= actiongrad(((double *)input)[i]); \
FOREACH(j, GETLENGTH(outerror)) \
bd[j] += ((double *)outerror)[j]; \
for (int x = 0; x < GETLENGTH(weight); ++x) \
for (int y = 0; y < GETLENGTH(*weight); ++y) \
wd[x][y] += ((double *)input)[x] * ((double *)outerror)[y]; \
}
double relu(double x)
{
return x*(x > 0);
}
double relugrad(double y)
{
return y > 0;
}
static void forward(LeNet5 *lenet, Feature *features, double(*action)(double))
{
CONVOLUTION_FORWARD(features->input, features->layer1, lenet->weight0_1, lenet->bias0_1, action);
SUBSAMP_MAX_FORWARD(features->layer1, features->layer2);
CONVOLUTION_FORWARD(features->layer2, features->layer3, lenet->weight2_3, lenet->bias2_3, action);
SUBSAMP_MAX_FORWARD(features->layer3, features->layer4);
CONVOLUTION_FORWARD(features->layer4, features->layer5, lenet->weight4_5, lenet->bias4_5, action);
DOT_PRODUCT_FORWARD(features->layer5, features->output, lenet->weight5_6, lenet->bias5_6, action);
}
static void backward(LeNet5 *lenet, LeNet5 *deltas, Feature *errors, Feature *features, double(*actiongrad)(double))
{
DOT_PRODUCT_BACKWARD(features->layer5, errors->layer5, errors->output, lenet->weight5_6, deltas->weight5_6, deltas->bias5_6, actiongrad);
CONVOLUTION_BACKWARD(features->layer4, errors->layer4, errors->layer5, lenet->weight4_5, deltas->weight4_5, deltas->bias4_5, actiongrad);
SUBSAMP_MAX_BACKWARD(features->layer3, errors->layer3, errors->layer4);
CONVOLUTION_BACKWARD(features->layer2, errors->layer2, errors->layer3, lenet->weight2_3, deltas->weight2_3, deltas->bias2_3, actiongrad);
SUBSAMP_MAX_BACKWARD(features->layer1, errors->layer1, errors->layer2);
CONVOLUTION_BACKWARD(features->input, errors->input, errors->layer1, lenet->weight0_1, deltas->weight0_1, deltas->bias0_1, actiongrad);
}
static inline void load_input(Feature *features, image input)
{
double (*layer0)[LENGTH_FEATURE0][LENGTH_FEATURE0] = features->input;
const long sz = sizeof(image) / sizeof(**input);
double mean = 0, std = 0;
FOREACH(j, sizeof(image) / sizeof(*input))
FOREACH(k, sizeof(*input) / sizeof(**input))
{
mean += input[j][k];
std += input[j][k] * input[j][k];
}
mean /= sz;
std = sqrt(std / sz - mean*mean);
FOREACH(j, sizeof(image) / sizeof(*input))
FOREACH(k, sizeof(*input) / sizeof(**input))
{
layer0[0][j + PADDING][k + PADDING] = (input[j][k] - mean) / std;
}
}
static inline void softmax(double input[OUTPUT], double loss[OUTPUT], int label, int count)
{
double inner = 0;
for (int i = 0; i < count; ++i)
{
double res = 0;
for (int j = 0; j < count; ++j)
{
res += exp(input[j] - input[i]);
}
loss[i] = 1. / res;
inner -= loss[i] * loss[i];
}
inner += loss[label];
for (int i = 0; i < count; ++i)
{
loss[i] *= (i == label) - loss[i] - inner;
}
}
static void load_target(Feature *features, Feature *errors, int label)
{
double *output = (double *)features->output;
double *error = (double *)errors->output;
softmax(output, error, label, GETCOUNT(features->output));
}
static uint8 get_result(Feature *features, uint8 count)
{
double *output = (double *)features->output;
const int outlen = GETCOUNT(features->output);
uint8 result = 0;
printf("0: %7.6f ", output[0]);
double maxvalue = *output;
for (uint8 i = 1; i < count; ++i)
{
if (output[i] > maxvalue)
{
maxvalue = output[i];
result = i;
}
printf("%u: %7.6f ", i, output[i]);
}
printf(" \nprediction: %u\n", result);
return result;
}
static double f64rand()
{
static int randbit = 0;
if (!randbit)
{
srand((unsigned)time(0));
for (int i = RAND_MAX; i; i >>= 1, ++randbit);
}
unsigned long long lvalue = 0x4000000000000000L;
int i = 52 - randbit;
for (; i > 0; i -= randbit)
lvalue |= (unsigned long long)rand() << i;
lvalue |= (unsigned long long)rand() >> -i;
return *(double *)&lvalue - 3;
}
void TrainBatch(LeNet5 *lenet, image *inputs, uint8 *labels, int batchSize)
{
double buffer[GETCOUNT(LeNet5)] = { 0 };
int i = 0;
#pragma omp parallel for
for (i = 0; i < batchSize; ++i)
{
Feature features = { 0 };
Feature errors = { 0 };
LeNet5 deltas = { 0 };
load_input(&features, inputs[i]);
forward(lenet, &features, relu);
load_target(&features, &errors, labels[i]);
backward(lenet, &deltas, &errors, &features, relugrad);
#pragma omp critical
{
FOREACH(j, GETCOUNT(LeNet5))
buffer[j] += ((double *)&deltas)[j];
}
}
double k = ALPHA / batchSize;
FOREACH(i, GETCOUNT(LeNet5))
((double *)lenet)[i] += k * buffer[i];
}
void Train(LeNet5 *lenet, image input, uint8 label)
{
Feature features = { 0 };
Feature errors = { 0 };
LeNet5 deltas = { 0 };
load_input(&features, input);
forward(lenet, &features, relu);
load_target(&features, &errors, label);
backward(lenet, &deltas, &errors, &features, relugrad);
FOREACH(i, GETCOUNT(LeNet5))
((double *)lenet)[i] += ALPHA * ((double *)&deltas)[i];
}
uint8 Predict(LeNet5 *lenet, image input,uint8 count)
{
Feature features = { 0 };
load_input(&features, input);
forward(lenet, &features, relu);
return get_result(&features, count);
}
void Initial(LeNet5 *lenet)
{
for (double *pos = (double *)lenet->weight0_1; pos < (double *)lenet->bias0_1; *pos++ = f64rand());
for (double *pos = (double *)lenet->weight0_1; pos < (double *)lenet->weight2_3; *pos++ *= sqrt(6.0 / (LENGTH_KERNEL * LENGTH_KERNEL * (INPUT + LAYER1))));
for (double *pos = (double *)lenet->weight2_3; pos < (double *)lenet->weight4_5; *pos++ *= sqrt(6.0 / (LENGTH_KERNEL * LENGTH_KERNEL * (LAYER2 + LAYER3))));
for (double *pos = (double *)lenet->weight4_5; pos < (double *)lenet->weight5_6; *pos++ *= sqrt(6.0 / (LENGTH_KERNEL * LENGTH_KERNEL * (LAYER4 + LAYER5))));
for (double *pos = (double *)lenet->weight5_6; pos < (double *)lenet->bias0_1; *pos++ *= sqrt(6.0 / (LAYER5 + OUTPUT)));
for (int *pos = (int *)lenet->bias0_1; pos < (int *)(lenet + 1); *pos++ = 0);
}
|
spheres_ini.c | /* For generation of the initial particle distribution consisting of two colliding spheres for the SPH codes miluph and miluphcuda.
*
* Both spheres can in general consist of a core, a mantle and a shell of different materials.
* Alternatively they can also be set up following some given radial profiles.
*
* A relaxation technique, which calculates the pyhsically correct hydrostatic structure (adiabatic compression) and sets
* the particle's characteristics accordingly, is available (along with the Tillotson/ANEOS/ideal-gas eos).
*
* The particles can be set up either in an equally-spaced lattice (simple-cubic or hexagonally close-packed),
* or in spherical shells (produced via an interface to SEAGen).
*
* All materials can be optionally weibulled (distribute flaws following
* the Weibull distribution for use in the Grady-Kipp fragmentation model).
*
* Furthermore it is possible to include additional bodies/pointmasses (which then act gravitationally during the SPH simulation).
*
* All units are SI.
*
* Christoph Burger 07/Nov/2020
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <time.h>
#include <Python.h>
#include "spheres_ini.h"
#include "io.h"
#include "geometry.h"
#include "hydrostruct.h"
#ifdef MILUPHCUDA
#include <libconfig.h>
#endif
int main(int argc, char* argv[])
{
int i,j,k;
double tmp;
// set default filenames:
char infile[PATHLENGTH] = "spheres_ini.input";
char outfile[PATHLENGTH] = "impact.0000";
char source_dir[PATHLENGTH] = "../spheres_ini/";
#ifdef MILUPH
char matfile[PATHLENGTH] = "materialconstants.data";
char scenfile[PATHLENGTH] = "materialscenario.data";
FILE *sfl, *mfl;
#endif
#ifdef MILUPHCUDA
char matfile[PATHLENGTH] = "material.cfg";
#endif
char coordfile[PATHLENGTH];
char pointmassesfile[PATHLENGTH];
FILE *ofl, *cfl, *tfl, *pfl;
char t_structfile[PATHLENGTH] = "target.structure";
char p_structfile[PATHLENGTH] = "projectile.structure";
double sml_factor = 2.1; // default value for sml factor
int N_des, N; // desired (input parameter file) and actual/final (after creating the particles in the spheres) total particle numbers
int N_p_des, N_t_des; // based on volume ratio of proj/targ (all SPH particles have ~equal volumes)
int N_p, N_t, N_p_c, N_t_c, N_p_m, N_t_m; // actual/final particle numbers of proj/targ and their respective cores and mantles
double M_des, M_p_des, M_t_des, M_p_c_des, M_p_m_des, M_p_s_des, M_t_c_des, M_t_m_des, M_t_s_des; // initially desired masses, p/t for proj/targ, c/m/s for core/mantle/shell
double C_p_m_des, C_t_m_des, C_p_s_des, C_t_s_des; // desired mantle/shell mass fractions of proj/targ
double R_p_uncomp, R_t_uncomp, R_p_c_uncomp, R_t_c_uncomp, R_p_m_uncomp, R_t_m_uncomp; // uncompressed radii, p/t for proj/targ, c/m for core/mantle
double R_p_des, R_t_des, R_p_c_des, R_t_c_des, R_p_m_des, R_t_m_des; // desired radii (i.e. before actually building the spheres' particles), p/t for proj/targ, c/m for core/mantle
double M, M_p, M_t, M_p_c, M_t_c, M_p_m, M_t_m, R_p, R_t, R_p_c, R_t_c, R_p_m, R_t_m; // actual/final values (after building spheres / setting hydrostatic structure)
// hydrostatic internal structure (p/t for proj/targ, c/m/s for core/mantle/shell), where r increases with the array index:
int_struct_point int_struct_p_c[NSTEPS+1], int_struct_p_m[NSTEPS+1], int_struct_p_s[NSTEPS+1], int_struct_t_c[NSTEPS+1], int_struct_t_m[NSTEPS+1], int_struct_t_s[NSTEPS+1];
const double eps6 = 1.0e-6;
material mat[NMAT];
material_fp mat_fp[NMAT];
double M_particle_p[NMAT], M_particle_t[NMAT]; // SPH particle masses for the different materials (core/mantle/shell) and proj/targ in the case of homogeneous densities (no hydrostatic structures)
// either one of the following 2 pairs will be used for setting the impact geometry (but not both):
double ini_vel = -1.0, impact_par = -1.0; // initial velocity (in y-direction) and impact parameter
double vel_vesc = -1.0, impact_angle = -1.0; // v/v_esc and impact angle (in deg!), both at "touching ball" distance
int vel_vesc_angle = FALSE; // indicates which of the above pairs is used for setting the impact geometry
double ini_pos_p[DIM]; // initial position of projectile (where the target is at the origin)
double ini_dist_fact = -1.0, ini_dist, des_ini_dist;
double impact_vel_abs; // relative speed at touching ball distance
particle* p; // main particle array
int weibull_core, weibull_mantle, weibull_shell; // stores choice from input parameter file whether weibulling of core/mantle/shell material is desired (0/1)
double mpd; // mean particle distance: for SC and HCP = distance to direct neighbours in the lattice (constant throughout all materials and bodies); for SEAGen setup = max(mpd_p,mpd_t)
double mpd_p, mpd_t; // mpd of proj/targ for SEAGen setups, computed based on the volume per particle (the sphere's volume divided by N) and the (hypothetical) particle distance in a HCP lattice, which should provide a reasonable representative (rather higher than lower) value, computed as mpd_p/_t = cbrt( sqrt(2)*V_particle_p/_t )
double V_particle_p, V_particle_t; // volume per particle in proj/targ; they are equal for SC and HCP, but may differ to some degree for spherical shell setups
double r2; // squared distance to the origin
double baryc_x[DIM], baryc_v[DIM]; // pos and vel of the barycenter in the lab frame (where the bodies are initially placed, with the target at the origin and at rest)
double proj_pos_final[DIM], proj_vel_final[DIM], targ_pos_final[DIM], targ_vel_final[DIM];
double V_p_c_uncomp, V_p_m_uncomp, V_p_s_uncomp, V_t_c_uncomp, V_t_m_uncomp, V_t_s_uncomp; // uncompressed volumes, p/t for projectile/target, c/m/s for core/mantle/shell
int N_input = FALSE;
int M_output = FALSE;
int b_flag = FALSE;
N_input_coord* N_input_data;
int N_bodies = 2;
double ini_vel_vec[DIM];
double e_temp;
int ParticleGeometry = 1; // defaults to HCP
int OutputMode = 0; // defaults to HYDRO
int hydrostructFlag = FALSE;
double N_input_impact_angle, N_input_impact_vel_vesc, N_input_impact_vel_abs; // values computed from relative two-body orbit of proj+targ alone when in N_input mode ('N_input_impact_angle' in rad!)
double p_rot_period = -1.0, t_rot_period = -1.0; // rotation periods of proj/targ - negative values mean no rotation
double p_rot_axis[DIM], t_rot_axis[DIM]; // rotation axes of proj/targ
double p_omega[DIM], t_omega[DIM]; // angular velocity vectors of proj/targ
int aneos_i_rho, aneos_i_e;
double aneos_T, aneos_cs, aneos_entropy;
int aneos_phase_flag;
int allocated_ANEOS_mem_core_flag = FALSE, allocated_ANEOS_mem_mantle_flag = FALSE, allocated_ANEOS_mem_shell_flag = FALSE;
int useProfilesFlag = FALSE;
radial_profile_data *profile_target, *profile_projectile;
FILE *p_prof_file, *t_prof_file;
int p_profile_no_points = 0, t_profile_no_points = 0;
double struct_r_low = 0.1;
double struct_r_high = 1.1;
// seed random number generator (once for whole program)
srand( (unsigned)time(NULL) );
// process command line options
while ( (i = getopt(argc, argv, "?hG:S:O:HL:U:Rf:o:m:s:x:N:M:b")) != -1 ) // int-representations of command line options are successively saved in i
switch((char)i)
{
case '?':
help(*argv);
exit(0);
case 'h':
help(*argv);
exit(0);
case 'G':
ParticleGeometry = atoi(optarg);
if( ParticleGeometry != 0 && ParticleGeometry != 1 && ParticleGeometry != 2 )
ERRORTEXT("ERROR. Invalid choice of particle geometry ('-G' flag). Choose either 0, 1, or 2.\n")
break;
case 'S':
strncpy(source_dir, optarg, PATHLENGTH);
break;
case 'O':
OutputMode = atoi(optarg);
if( OutputMode != 0 && OutputMode != 1 && OutputMode != 2 && OutputMode != 3 )
ERRORTEXT("ERROR. Invalid choice of output mode ('-O' flag). Choose either 0, 1, 2, or 3.\n")
break;
case 'H':
hydrostructFlag = TRUE;
break;
case 'L':
struct_r_low = atof(optarg);
break;
case 'U':
struct_r_high = atof(optarg);
break;
case 'R':
useProfilesFlag = TRUE;
break;
case 'f':
strncpy(infile, optarg, PATHLENGTH);
break;
case 'o':
strncpy(outfile, optarg, PATHLENGTH);
break;
case 'm':
strncpy(matfile, optarg, PATHLENGTH);
break;
#ifdef MILUPH
case 's':
strncpy(scenfile, optarg, PATHLENGTH);
break;
#endif
case 'x':
sml_factor = atof(optarg);
break;
case 'N':
N_input = TRUE;
strncpy(coordfile, optarg, PATHLENGTH);
break;
case 'M':
M_output = TRUE;
strncpy(pointmassesfile, optarg, PATHLENGTH);
break;
case 'b':
b_flag = TRUE;
break;
default:
help(*argv);
exit(1);
}
// checks on cmd-line choices
if( hydrostructFlag && useProfilesFlag )
ERRORTEXT("ERROR. Cmd-line flags '-H' and '-R' were both set. You can't use them both at the same time ... try again!\n")
if( N_input == FALSE && M_output == TRUE )
ERRORTEXT("ERROR. Cmd-line flag '-M' was set but not '-N' ... try again!\n")
if( M_output && b_flag )
ERRORTEXT("ERROR. Cmd-line flag '-M' is incompatible with '-b' ... try again!\n")
// read values from input file
fprintf(stdout, "--------------------------------\n");
fprintf(stdout, "Reading input parameter file '%s' ... ", infile);
read_inputfile(infile, &N_des, &M_des, &M_p_des, &C_p_m_des, &C_p_s_des, &C_t_m_des, &C_t_s_des, &ini_vel, &impact_par, &vel_vesc,
&impact_angle, &ini_dist_fact, &weibull_core, &weibull_mantle, &weibull_shell, &(mat[CORE].eos), &(mat[MANTLE].eos), &(mat[SHELL].eos),
mat[CORE].mat_name, mat[MANTLE].mat_name, mat[SHELL].mat_name, &p_rot_period, &t_rot_period, p_rot_axis, t_rot_axis);
fprintf(stdout, "Done.\n");
if( vel_vesc >= 0.0 && impact_angle >= 0.0 )
vel_vesc_angle = TRUE;
// run some consistency checks on read data
if( M_des < M_p_des && !useProfilesFlag )
ERRORVAR("ERROR. Found 'M_tot' < 'M_proj' in input parameter file '%s'. That's not possible.\n", infile)
if( ini_dist_fact < 1.0 )
ERRORVAR("ERROR. 'ini_dist_fact' was found to be < 1.0 ... that's not a good idea. Check '%s'!\n", infile)
if( hydrostructFlag )
if ( (mat[CORE].eos != 'T' && mat[CORE].eos != 'A' && mat[CORE].eos != 'I') || (mat[MANTLE].eos != 'T' && mat[MANTLE].eos != 'A' && mat[MANTLE].eos != 'I') || (mat[SHELL].eos != 'T' && mat[SHELL].eos != 'A' && mat[SHELL].eos != 'I') )
ERRORVAR("ERROR. Computation of the hydrostatic structure is implemented only along with Tillotson/ANEOS/ideal gas EoS. Check '%s' ...\n", infile)
if( p_rot_period == 0.0 || t_rot_period == 0.0 )
ERRORVAR("ERROR. Check rotation periods in '%s'. Zero means infinitely fast rotation. Not implemented yet.\n", infile)
// mark quantities that are not used any further
if( useProfilesFlag )
M_des = M_p_des = C_p_m_des = C_p_s_des = C_t_m_des = C_t_s_des = -1.0;
// read coordinates file if in N_input mode
if( N_input == TRUE )
{
fprintf(stdout, "--------------------------------\n");
fprintf(stdout, "Reading coordinates file '%s' ... ", coordfile);
if ( (cfl = fopen(coordfile, "r")) == NULL )
ERRORVAR("FILE ERROR! Cannot open '%s' for reading!\n", coordfile)
if( ( N_input_data = (N_input_coord*)malloc(sizeof(N_input_coord)) ) == NULL )
ERRORTEXT("ERROR during memory allocation!\n")
fscanf(cfl, "%*[^\n]\n"); // ignore first line
i = N_bodies = 0;
while( fscanf(cfl, "%le %le %le %le %le %le %le%*[^\n]\n", &(N_input_data[i].x[0]), &(N_input_data[i].x[1]), &(N_input_data[i].x[2]), &(N_input_data[i].v[0]), &(N_input_data[i].v[1]), &(N_input_data[i].v[2]), &(N_input_data[i].mass) ) == 7 )
{
N_bodies++;
i++;
if( ( N_input_data = (N_input_coord*)realloc(N_input_data,(N_bodies+1)*sizeof(N_input_coord)) ) == NULL )
ERRORTEXT("ERROR during memory allocation!\n")
}
fclose(cfl);
if( N_bodies < 2 )
ERRORVAR("ERROR! Too little bodies in coordinates file '%s'!\n", coordfile)
else
fprintf(stdout, "found %d bodies.\n", N_bodies);
// overwrite respective values from input parameter file
M_des = N_input_data[0].mass + N_input_data[1].mass;
M_p_des = N_input_data[0].mass;
ini_vel = impact_par = vel_vesc = impact_angle = -1.0;
ini_dist_fact = -1.0;
// overwrite total and projectile mass if profiles are read from file
if( useProfilesFlag )
M_des = M_p_des = -1.0;
}
// read radial profile(s) from files if desired
if( useProfilesFlag )
{
fprintf(stdout, "--------------------------------\n");
fprintf(stdout, "Reading file(s) containing radial profiles ...\n");
if( PROFILE_FILE_PROJ != 0 ) // there is a projectile
{
if ( (p_prof_file = fopen(PROFILE_FILE_PROJ,"r")) == NULL )
ERRORVAR("FILE ERROR! Cannot open '%s' for reading!\n", PROFILE_FILE_PROJ)
if( ( profile_projectile = (radial_profile_data*)malloc(sizeof(radial_profile_data)) ) == NULL )
ERRORTEXT("ERROR during memory allocation!\n")
fscanf(p_prof_file, "%*[^\n]\n"); // ignore first line
i = 0;
while( fscanf(p_prof_file, "%le %le %le%*[^\n]\n", &(profile_projectile[i].r), &(profile_projectile[i].rho), &(profile_projectile[i].e) ) == 3 )
{
i++;
if( ( profile_projectile = (radial_profile_data*)realloc(profile_projectile,(i+1)*sizeof(radial_profile_data)) ) == NULL )
ERRORTEXT("ERROR during memory allocation!\n")
}
p_profile_no_points = i;
fprintf(stdout, "Found %d datapoints for the projectile profile in '%s'.\n", p_profile_no_points, PROFILE_FILE_PROJ);
fclose(p_prof_file);
}
if( PROFILE_FILE_TARG != 0 ) // there is a target
{
if ( (t_prof_file = fopen(PROFILE_FILE_TARG,"r")) == NULL )
ERRORVAR("FILE ERROR! Cannot open '%s' for reading!\n", PROFILE_FILE_TARG)
if( ( profile_target = (radial_profile_data*)malloc(sizeof(radial_profile_data)) ) == NULL )
ERRORTEXT("ERROR during memory allocation!\n")
fscanf(t_prof_file, "%*[^\n]\n"); // ignore first line
i = 0;
while( fscanf(t_prof_file, "%le %le %le%*[^\n]\n", &(profile_target[i].r), &(profile_target[i].rho), &(profile_target[i].e) ) == 3 )
{
i++;
if( ( profile_target = (radial_profile_data*)realloc(profile_target,(i+1)*sizeof(radial_profile_data)) ) == NULL )
ERRORTEXT("ERROR during memory allocation!\n")
}
t_profile_no_points = i;
fprintf(stdout, "Found %d datapoints for the target profile in '%s'.\n", t_profile_no_points, PROFILE_FILE_TARG);
fclose(t_prof_file);
}
}
// read (uncompressed) densities, weibull parameters (if required), bulk modulus (for calculating sound speed),
// Tillotson EoS parameters (if EoS is Tillotson), and ANEOS parameters (if EoS is ANEOS) for all relevant materials from material file,
// or assign them for ideal gas for miluph, or also read them for ideal gas from the material file for miluphcuda
mat[CORE].mat_type = MATTYPECORE;
mat[MANTLE].mat_type = MATTYPEMANTLE;
mat[SHELL].mat_type = MATTYPESHELL;
#ifdef MILUPH
if ( (mfl = fopen(matfile,"r")) == NULL ) // in case of miluphcuda file opening, etc. is handled by libconfig
ERRORVAR("FILE ERROR! Cannot open '%s' for reading!\n", matfile)
if( mat[CORE].eos == 'I' )
{
fprintf(stdout, "--------------------------------\n");
fprintf(stdout, "Assigning values for material \"%s\", material type \"%d\", and compute sound speed from rho_0 and p_0 ...\n", mat[CORE].mat_name, mat[CORE].mat_type);
mat[CORE].rho_0 = mat[CORE].ideal_gas.rho_0 = I_RHO0;
mat[CORE].ideal_gas.p_0 = I_P0;
mat[CORE].ideal_gas.gamma = I_GAMMA;
mat[CORE].ideal_gas.polytropic_K = mat[CORE].ideal_gas.p_0 / pow(mat[CORE].ideal_gas.rho_0,mat[CORE].ideal_gas.gamma);
mat[CORE].cs = sqrt( mat[CORE].ideal_gas.gamma * mat[CORE].ideal_gas.p_0 / mat[CORE].ideal_gas.rho_0 ); // compute cs with rho_0 and p_0 for now ...
}
else
{
readMaterialConstants(mfl, &mat[CORE], weibull_core);
if( mat[CORE].eos == 'A' && hydrostructFlag )
{
allocate_ANEOS_table_memory(&mat[CORE]);
load_ANEOS_table(&mat[CORE]);
}
}
if( mat[MANTLE].eos == 'I' )
{
fprintf(stdout, "--------------------------------\n");
fprintf(stdout, "Assigning values for material \"%s\", material type \"%d\", and compute sound speed from rho_0 and p_0 ...\n", mat[MANTLE].mat_name, mat[MANTLE].mat_type);
mat[MANTLE].rho_0 = mat[MANTLE].ideal_gas.rho_0 = I_RHO0;
mat[MANTLE].ideal_gas.p_0 = I_P0;
mat[MANTLE].ideal_gas.gamma = I_GAMMA;
mat[MANTLE].ideal_gas.polytropic_K = mat[MANTLE].ideal_gas.p_0 / pow(mat[MANTLE].ideal_gas.rho_0,mat[MANTLE].ideal_gas.gamma);
mat[MANTLE].cs = sqrt( mat[MANTLE].ideal_gas.gamma * mat[MANTLE].ideal_gas.p_0 / mat[MANTLE].ideal_gas.rho_0 ); // compute cs with rho_0 and p_0 for now ...
}
else
{
readMaterialConstants(mfl, &mat[MANTLE], weibull_mantle);
if( mat[MANTLE].eos == 'A' && hydrostructFlag )
{
allocate_ANEOS_table_memory(&mat[MANTLE]);
load_ANEOS_table(&mat[MANTLE]);
}
}
if( mat[SHELL].eos == 'I' )
{
fprintf(stdout, "--------------------------------\n");
fprintf(stdout, "Assigning values for material \"%s\", material type \"%d\", and compute sound speed from rho_0 and p_0 ...\n", mat[SHELL].mat_name, mat[SHELL].mat_type);
mat[SHELL].rho_0 = mat[SHELL].ideal_gas.rho_0 = I_RHO0;
mat[SHELL].ideal_gas.p_0 = I_P0;
mat[SHELL].ideal_gas.gamma = I_GAMMA;
mat[SHELL].ideal_gas.polytropic_K = mat[SHELL].ideal_gas.p_0 / pow(mat[SHELL].ideal_gas.rho_0,mat[SHELL].ideal_gas.gamma);
mat[SHELL].cs = sqrt( mat[SHELL].ideal_gas.gamma * mat[SHELL].ideal_gas.p_0 / mat[SHELL].ideal_gas.rho_0 ); // compute cs with rho_0 and p_0 for now ...
}
else
{
readMaterialConstants(mfl, &mat[SHELL], weibull_shell);
if( mat[SHELL].eos == 'A' && hydrostructFlag )
{
allocate_ANEOS_table_memory(&mat[SHELL]);
load_ANEOS_table(&mat[SHELL]);
}
}
fclose(mfl);
#endif // MILUPH
#ifdef MILUPHCUDA
if( useProfilesFlag ) // some given radial profiles are read from files
{
if( (PROFILE_FILE_PROJ != 0 && PROFILE_R_P_C/PROFILE_R_P > eps6) || (PROFILE_FILE_TARG != 0 && PROFILE_R_T_C/PROFILE_R_T > eps6) ) // if there is any core in proj/targ
readMaterialConfiguration(matfile, &mat[CORE], weibull_core);
if( (PROFILE_FILE_PROJ != 0 && (PROFILE_R_P_M-PROFILE_R_P_C)/PROFILE_R_P > eps6) || (PROFILE_FILE_TARG != 0 && (PROFILE_R_T_M-PROFILE_R_T_C)/PROFILE_R_T > eps6) ) // if there is any mantle in proj/targ
{
readMaterialConfiguration(matfile, &mat[MANTLE], weibull_mantle);
}
else
{
memcpy(&mat[MANTLE], &mat[CORE], sizeof(material)); // if there is no actual mantle copy the core material to mat[MANTLE] as dummy
mat[MANTLE].mat_type = MATTYPEMANTLE;
}
if( (PROFILE_FILE_PROJ != 0 && (PROFILE_R_P-PROFILE_R_P_M)/PROFILE_R_P > eps6) || (PROFILE_FILE_TARG != 0 && (PROFILE_R_T-PROFILE_R_T_M)/PROFILE_R_T > eps6) ) // if there is any shell in proj/targ
{
readMaterialConfiguration(matfile, &mat[SHELL], weibull_shell);
}
else
{
memcpy(&mat[SHELL], &mat[CORE], sizeof(material)); // if there is no actual shell copy the core material to mat[SHELL] as dummy
mat[SHELL].mat_type = MATTYPESHELL;
}
}
else
{
if( ((1.0-C_p_m_des-C_p_s_des) > eps6*eps6) || ((1.0-C_t_m_des-C_t_s_des) > eps6*eps6) ) // if there is any core in proj/targ
{
readMaterialConfiguration(matfile, &mat[CORE], weibull_core);
if( mat[CORE].eos == 'A' && hydrostructFlag )
{
allocate_ANEOS_table_memory(&mat[CORE]);
allocated_ANEOS_mem_core_flag = TRUE;
load_ANEOS_table(&mat[CORE]);
}
}
if( (C_p_m_des > eps6*eps6) || (C_t_m_des > eps6*eps6) ) // if there is any mantle in proj/targ
{
readMaterialConfiguration(matfile, &mat[MANTLE], weibull_mantle);
if( mat[MANTLE].eos == 'A' && hydrostructFlag )
{
allocate_ANEOS_table_memory(&mat[MANTLE]);
allocated_ANEOS_mem_mantle_flag = TRUE;
load_ANEOS_table(&mat[MANTLE]);
}
}
else
{
memcpy(&mat[MANTLE], &mat[CORE], sizeof(material)); // if there is no actual mantle copy the core material to mat[MANTLE] as dummy
mat[MANTLE].mat_type = MATTYPEMANTLE;
}
if( (C_p_s_des > eps6*eps6) || (C_t_s_des > eps6*eps6) ) // if there is any shell in proj/targ
{
readMaterialConfiguration(matfile, &mat[SHELL], weibull_shell);
if( mat[SHELL].eos == 'A' && hydrostructFlag )
{
allocate_ANEOS_table_memory(&mat[SHELL]);
allocated_ANEOS_mem_shell_flag = TRUE;
load_ANEOS_table(&mat[SHELL]);
}
}
else
{
memcpy(&mat[SHELL], &mat[CORE], sizeof(material)); // if there is no actual shell copy the core material to mat[SHELL] as dummy
mat[SHELL].mat_type = MATTYPESHELL;
}
}
#endif // MILUPHCUDA
// set rho_limit (for Tillotson EoS) in case of miluph (for miluphcuda it is read from the materialconfiguration file)
#ifdef MILUPH
mat[CORE].till.rho_limit = mat[MANTLE].till.rho_limit = mat[SHELL].till.rho_limit = TILL_RHO_LIMIT;
#endif
// assign function pointer to eos-related functions for all materials (only relevant for materials suitable for relaxation via hydrostatic structure)
for(i=0; i<NMAT; i++)
{
if( mat[i].eos == 'T' )
{
mat_fp[i].eos_pressure = Tillotson_pressure;
mat_fp[i].eos_density = Tillotson_density;
mat_fp[i].eos_rho_self_consistent = rho_self_consistent_Tillotson;
mat_fp[i].eos_e_compression = e_compression_Tillotson;
}
else if( mat[i].eos == 'A' )
{
mat_fp[i].eos_pressure = ANEOS_pressure;
mat_fp[i].eos_density = ANEOS_density;
mat_fp[i].eos_rho_self_consistent = rho_self_consistent_ANEOS;
mat_fp[i].eos_e_compression = e_compression_ANEOS;
}
else if( mat[i].eos == 'I' )
{
mat_fp[i].eos_pressure = ideal_gas_pressure;
mat_fp[i].eos_density = ideal_gas_density;
mat_fp[i].eos_rho_self_consistent = rho_self_consistent_ideal_gas;
mat_fp[i].eos_e_compression = e_compression_ideal_gas;
}
}
// calculate desired masses from input values
if( useProfilesFlag )
{
M_t_des = M_p_c_des = M_p_m_des = M_p_s_des = M_t_c_des = M_t_m_des = M_t_s_des = -1.0;
}
else
{
M_t_des = M_des-M_p_des;
M_p_c_des = M_p_des*(1-C_p_m_des-C_p_s_des);
M_p_m_des = M_p_des*C_p_m_des;
M_p_s_des = M_p_des*C_p_s_des;
M_t_c_des = M_t_des*(1-C_t_m_des-C_t_s_des);
M_t_m_des = M_t_des*C_t_m_des;
M_t_s_des = M_t_des*C_t_s_des;
}
// calculate radii assuming uncompressed material (from desired masses)
if( useProfilesFlag )
{
R_p_c_uncomp = R_t_c_uncomp = R_p_m_uncomp = R_t_m_uncomp = R_p_uncomp = R_t_uncomp = -1.0;
}
else
{
R_p_c_uncomp = cbrt( THROVER4PI*M_p_c_des/mat[CORE].rho_0 );
R_t_c_uncomp = cbrt( THROVER4PI*M_t_c_des/mat[CORE].rho_0 );
R_p_m_uncomp = cbrt( THROVER4PI*M_p_m_des/mat[MANTLE].rho_0 + pow(R_p_c_uncomp,3) );
R_t_m_uncomp = cbrt( THROVER4PI*M_t_m_des/mat[MANTLE].rho_0 + pow(R_t_c_uncomp,3) );
R_p_uncomp = cbrt( THROVER4PI*M_p_s_des/mat[SHELL].rho_0 + pow(R_p_m_uncomp,3) );
R_t_uncomp = cbrt( THROVER4PI*M_t_s_des/mat[SHELL].rho_0 + pow(R_t_m_uncomp,3) );
}
// calculate hydrostatic internal structure and assign found radii of proj/targ
if( hydrostructFlag )
{
#pragma omp parallel
{
#pragma omp sections
{
#pragma omp section
if( M_p_des/M_des > eps6 ) // if there is a projectile
calc_internal_structure(M_p_des, M_p_c_des, M_p_m_des, struct_r_low*R_p_uncomp, struct_r_high*R_p_uncomp, mat, mat_fp, int_struct_p_c, int_struct_p_m, int_struct_p_s, NSTEPS);
#pragma omp section
if( M_t_des/M_des > eps6 ) // if there is a target
calc_internal_structure(M_t_des, M_t_c_des, M_t_m_des, struct_r_low*R_t_uncomp, struct_r_high*R_t_uncomp, mat, mat_fp, int_struct_t_c, int_struct_t_m, int_struct_t_s, NSTEPS);
} // end of omp sections region
} // end of omp parallel region
// assign hydrostatically calculated radii of the projectile (depending on what it actually consists of) and write structure file
if (M_p_des/M_des > eps6)
{
if( M_p_s_des/M_des > eps6 ) // if there is a shell
R_p_des = int_struct_p_s[NSTEPS].r;
else if( M_p_m_des/M_des > eps6 ) // if there is no shell, but a mantle
R_p_des = int_struct_p_m[NSTEPS].r;
else // if there is no shell or mantle, but a core
R_p_des = int_struct_p_c[NSTEPS].r;
#ifdef HYDROSTRUCT_DEBUG
fprintf(stdout, "FOUND HYDROSTATIC STRUCTURE of the PROJECTILE with R_p_des = %.15le\n", R_p_des);
#endif
if ( (pfl = fopen(p_structfile,"w")) == NULL )
ERRORVAR("FILE ERROR! Cannot open '%s' for writing!\n", p_structfile)
fprintf(pfl, "# r (m)\t\tm (kg)\t\trho (kg/m^3)\te (J/kg)\tp (Pa)");
if( mat[CORE].eos == 'I' || mat[MANTLE].eos == 'I' || mat[SHELL].eos == 'I' || mat[CORE].eos == 'A' || mat[MANTLE].eos == 'A' || mat[SHELL].eos == 'A' )
fprintf(pfl, "\t\tT (K)");
if( mat[CORE].eos == 'A' || mat[MANTLE].eos == 'A' || mat[SHELL].eos == 'A' )
fprintf(pfl, "\t\tsound-speed (m/s)\t\tentropy (J/kg/K)\t\tphase-flag");
fprintf(pfl, "\n");
if( M_p_c_des/M_des > eps6 ) // if there is a core
{
R_p_c_des = int_struct_p_c[NSTEPS].r;
for(i=0; i<=NSTEPS; i++)
{
e_temp = (*(mat_fp[CORE].eos_e_compression))(int_struct_p_c[i].rho, &mat[CORE]);
fprintf(pfl, "%.16le\t%.16le\t%.16le\t%.16le\t%.16le", int_struct_p_c[i].r, int_struct_p_c[i].m, int_struct_p_c[i].rho, e_temp, int_struct_p_c[i].p);
if( mat[CORE].eos == 'I' )
{
#ifdef MILUPH
fprintf(pfl, "\t%.16le", e_temp*CONVERSION_E_TO_T);
#endif
#ifdef MILUPHCUDA
fprintf(pfl, "\t%.16le", e_temp*(mat[CORE].ideal_gas.conv_e_to_T));
#endif
}
if( mat[CORE].eos == 'A' )
{
// find array-indices just below the actual values of rho and e
aneos_i_rho = array_index(int_struct_p_c[i].rho, mat[CORE].aneos.rho, mat[CORE].aneos.n_rho);
aneos_i_e = array_index(e_temp, mat[CORE].aneos.e, mat[CORE].aneos.n_e);
// interpolate (bi)linearly
aneos_T = bilinear_interpolation(int_struct_p_c[i].rho, e_temp, mat[CORE].aneos.T, mat[CORE].aneos.rho, mat[CORE].aneos.e, aneos_i_rho, aneos_i_e, mat[CORE].aneos.n_rho, mat[CORE].aneos.n_e);
aneos_cs = bilinear_interpolation(int_struct_p_c[i].rho, e_temp, mat[CORE].aneos.cs, mat[CORE].aneos.rho, mat[CORE].aneos.e, aneos_i_rho, aneos_i_e, mat[CORE].aneos.n_rho, mat[CORE].aneos.n_e);
aneos_entropy = bilinear_interpolation(int_struct_p_c[i].rho, e_temp, mat[CORE].aneos.entropy, mat[CORE].aneos.rho, mat[CORE].aneos.e, aneos_i_rho, aneos_i_e, mat[CORE].aneos.n_rho, mat[CORE].aneos.n_e);
aneos_phase_flag = discrete_value_table_lookup(int_struct_p_c[i].rho, e_temp, mat[CORE].aneos.phase_flag, mat[CORE].aneos.rho, mat[CORE].aneos.e, aneos_i_rho, aneos_i_e, mat[CORE].aneos.n_rho, mat[CORE].aneos.n_e);
fprintf(pfl, "\t%.16le\t%.16le\t%.16le\t%d", aneos_T, aneos_cs, aneos_entropy, aneos_phase_flag);
}
fprintf(pfl, "\n");
}
}
else
R_p_c_des = 0.0;
if( M_p_m_des/M_des > eps6 ) // if there is a mantle
{
R_p_m_des = int_struct_p_m[NSTEPS].r;
for(i=0; i<=NSTEPS; i++)
{
e_temp = (*(mat_fp[MANTLE].eos_e_compression))(int_struct_p_m[i].rho, &mat[MANTLE]);
fprintf(pfl, "%.16le\t%.16le\t%.16le\t%.16le\t%.16le", int_struct_p_m[i].r, int_struct_p_m[i].m, int_struct_p_m[i].rho, e_temp, int_struct_p_m[i].p);
if( mat[MANTLE].eos == 'I' )
{
#ifdef MILUPH
fprintf(pfl, "\t%.16le", e_temp*CONVERSION_E_TO_T);
#endif
#ifdef MILUPHCUDA
fprintf(pfl, "\t%.16le", e_temp*(mat[MANTLE].ideal_gas.conv_e_to_T));
#endif
}
if( mat[MANTLE].eos == 'A' )
{
// find array-indices just below the actual values of rho and e
aneos_i_rho = array_index(int_struct_p_m[i].rho, mat[MANTLE].aneos.rho, mat[MANTLE].aneos.n_rho);
aneos_i_e = array_index(e_temp, mat[MANTLE].aneos.e, mat[MANTLE].aneos.n_e);
// interpolate (bi)linearly
aneos_T = bilinear_interpolation(int_struct_p_m[i].rho, e_temp, mat[MANTLE].aneos.T, mat[MANTLE].aneos.rho, mat[MANTLE].aneos.e, aneos_i_rho, aneos_i_e, mat[MANTLE].aneos.n_rho, mat[MANTLE].aneos.n_e);
aneos_cs = bilinear_interpolation(int_struct_p_m[i].rho, e_temp, mat[MANTLE].aneos.cs, mat[MANTLE].aneos.rho, mat[MANTLE].aneos.e, aneos_i_rho, aneos_i_e, mat[MANTLE].aneos.n_rho, mat[MANTLE].aneos.n_e);
aneos_entropy = bilinear_interpolation(int_struct_p_m[i].rho, e_temp, mat[MANTLE].aneos.entropy, mat[MANTLE].aneos.rho, mat[MANTLE].aneos.e, aneos_i_rho, aneos_i_e, mat[MANTLE].aneos.n_rho, mat[MANTLE].aneos.n_e);
aneos_phase_flag = discrete_value_table_lookup(int_struct_p_m[i].rho, e_temp, mat[MANTLE].aneos.phase_flag, mat[MANTLE].aneos.rho, mat[MANTLE].aneos.e, aneos_i_rho, aneos_i_e, mat[MANTLE].aneos.n_rho, mat[MANTLE].aneos.n_e);
fprintf(pfl, "\t%.16le\t%.16le\t%.16le\t%d", aneos_T, aneos_cs, aneos_entropy, aneos_phase_flag);
}
fprintf(pfl, "\n");
}
}
else
R_p_m_des = R_p_c_des;
if( M_p_s_des/M_des > eps6 ) // if there is a shell
for(i=0; i<=NSTEPS; i++)
{
e_temp = (*(mat_fp[SHELL].eos_e_compression))(int_struct_p_s[i].rho, &mat[SHELL]);
fprintf(pfl, "%.16le\t%.16le\t%.16le\t%.16le\t%.16le", int_struct_p_s[i].r, int_struct_p_s[i].m, int_struct_p_s[i].rho, e_temp, int_struct_p_s[i].p);
if( mat[SHELL].eos == 'I' )
{
#ifdef MILUPH
fprintf(pfl, "\t%.16le", e_temp*CONVERSION_E_TO_T);
#endif
#ifdef MILUPHCUDA
fprintf(pfl, "\t%.16le", e_temp*(mat[SHELL].ideal_gas.conv_e_to_T));
#endif
}
if( mat[SHELL].eos == 'A' )
{
// find array-indices just below the actual values of rho and e
aneos_i_rho = array_index(int_struct_p_s[i].rho, mat[SHELL].aneos.rho, mat[SHELL].aneos.n_rho);
aneos_i_e = array_index(e_temp, mat[SHELL].aneos.e, mat[SHELL].aneos.n_e);
// interpolate (bi)linearly
aneos_T = bilinear_interpolation(int_struct_p_s[i].rho, e_temp, mat[SHELL].aneos.T, mat[SHELL].aneos.rho, mat[SHELL].aneos.e, aneos_i_rho, aneos_i_e, mat[SHELL].aneos.n_rho, mat[SHELL].aneos.n_e);
aneos_cs = bilinear_interpolation(int_struct_p_s[i].rho, e_temp, mat[SHELL].aneos.cs, mat[SHELL].aneos.rho, mat[SHELL].aneos.e, aneos_i_rho, aneos_i_e, mat[SHELL].aneos.n_rho, mat[SHELL].aneos.n_e);
aneos_entropy = bilinear_interpolation(int_struct_p_s[i].rho, e_temp, mat[SHELL].aneos.entropy, mat[SHELL].aneos.rho, mat[SHELL].aneos.e, aneos_i_rho, aneos_i_e, mat[SHELL].aneos.n_rho, mat[SHELL].aneos.n_e);
aneos_phase_flag = discrete_value_table_lookup(int_struct_p_s[i].rho, e_temp, mat[SHELL].aneos.phase_flag, mat[SHELL].aneos.rho, mat[SHELL].aneos.e, aneos_i_rho, aneos_i_e, mat[SHELL].aneos.n_rho, mat[SHELL].aneos.n_e);
fprintf(pfl, "\t%.16le\t%.16le\t%.16le\t%d", aneos_T, aneos_cs, aneos_entropy, aneos_phase_flag);
}
fprintf(pfl, "\n");
}
fclose(pfl);
}
else
R_p_des = R_p_c_des = R_p_m_des = 0.0;
// assign hydrostatically calculated radii of the target (depending on what it actually consists of) and write structure file
if (M_t_des/M_des > eps6)
{
if( M_t_s_des/M_des > eps6 ) // if there is a shell
R_t_des = int_struct_t_s[NSTEPS].r;
else if( M_t_m_des/M_des > eps6 ) // if there is no shell, but a mantle
R_t_des = int_struct_t_m[NSTEPS].r;
else // if there is no shell or mantle, but a core
R_t_des = int_struct_t_c[NSTEPS].r;
#ifdef HYDROSTRUCT_DEBUG
fprintf(stdout, "FOUND HYDROSTATIC STRUCTURE of the TARGET with R_t_des = %.16le\n", R_t_des);
#endif
if ( (tfl = fopen(t_structfile,"w")) == NULL )
ERRORVAR("FILE ERROR! Cannot open '%s' for writing!\n", t_structfile)
fprintf(tfl, "# r (m)\t\tm (kg)\t\trho (kg/m^3)\te (J/kg)\tp (Pa)");
if( mat[CORE].eos == 'I' || mat[MANTLE].eos == 'I' || mat[SHELL].eos == 'I' || mat[CORE].eos == 'A' || mat[MANTLE].eos == 'A' || mat[SHELL].eos == 'A' )
fprintf(tfl, "\t\tT (K)");
if( mat[CORE].eos == 'A' || mat[MANTLE].eos == 'A' || mat[SHELL].eos == 'A' )
fprintf(tfl, "\t\tsound-speed (m/s)\t\tentropy (J/kg/K)\t\tphase-flag");
fprintf(tfl, "\n");
if( M_t_c_des/M_des > eps6 ) // if there is a core
{
R_t_c_des = int_struct_t_c[NSTEPS].r;
for(i=0; i<=NSTEPS; i++)
{
e_temp = (*(mat_fp[CORE].eos_e_compression))(int_struct_t_c[i].rho, &mat[CORE]);
fprintf(tfl, "%.16le\t%.16le\t%.16le\t%.16le\t%.16le", int_struct_t_c[i].r, int_struct_t_c[i].m, int_struct_t_c[i].rho, e_temp, int_struct_t_c[i].p);
if( mat[CORE].eos == 'I' )
{
#ifdef MILUPH
fprintf(tfl, "\t%.16le", e_temp*CONVERSION_E_TO_T);
#endif
#ifdef MILUPHCUDA
fprintf(tfl, "\t%.16le", e_temp*(mat[CORE].ideal_gas.conv_e_to_T));
#endif
}
if( mat[CORE].eos == 'A' )
{
// find array-indices just below the actual values of rho and e
aneos_i_rho = array_index(int_struct_t_c[i].rho, mat[CORE].aneos.rho, mat[CORE].aneos.n_rho);
aneos_i_e = array_index(e_temp, mat[CORE].aneos.e, mat[CORE].aneos.n_e);
// interpolate (bi)linearly
aneos_T = bilinear_interpolation(int_struct_t_c[i].rho, e_temp, mat[CORE].aneos.T, mat[CORE].aneos.rho, mat[CORE].aneos.e, aneos_i_rho, aneos_i_e, mat[CORE].aneos.n_rho, mat[CORE].aneos.n_e);
aneos_cs = bilinear_interpolation(int_struct_t_c[i].rho, e_temp, mat[CORE].aneos.cs, mat[CORE].aneos.rho, mat[CORE].aneos.e, aneos_i_rho, aneos_i_e, mat[CORE].aneos.n_rho, mat[CORE].aneos.n_e);
aneos_entropy = bilinear_interpolation(int_struct_t_c[i].rho, e_temp, mat[CORE].aneos.entropy, mat[CORE].aneos.rho, mat[CORE].aneos.e, aneos_i_rho, aneos_i_e, mat[CORE].aneos.n_rho, mat[CORE].aneos.n_e);
aneos_phase_flag = discrete_value_table_lookup(int_struct_t_c[i].rho, e_temp, mat[CORE].aneos.phase_flag, mat[CORE].aneos.rho, mat[CORE].aneos.e, aneos_i_rho, aneos_i_e, mat[CORE].aneos.n_rho, mat[CORE].aneos.n_e);
fprintf(tfl, "\t%.16le\t%.16le\t%.16le\t%d", aneos_T, aneos_cs, aneos_entropy, aneos_phase_flag);
}
fprintf(tfl, "\n");
}
}
else
R_t_c_des = 0.0;
if( M_t_m_des/M_des > eps6 ) // if there is a mantle
{
R_t_m_des = int_struct_t_m[NSTEPS].r;
for(i=0; i<=NSTEPS; i++)
{
e_temp = (*(mat_fp[MANTLE].eos_e_compression))(int_struct_t_m[i].rho, &mat[MANTLE]);
fprintf(tfl, "%.16le\t%.16le\t%.16le\t%.16le\t%.16le", int_struct_t_m[i].r, int_struct_t_m[i].m, int_struct_t_m[i].rho, e_temp, int_struct_t_m[i].p);
if( mat[MANTLE].eos == 'I' )
{
#ifdef MILUPH
fprintf(tfl, "\t%.16le", e_temp*CONVERSION_E_TO_T);
#endif
#ifdef MILUPHCUDA
fprintf(tfl, "\t%.16le", e_temp*(mat[MANTLE].ideal_gas.conv_e_to_T));
#endif
}
if( mat[MANTLE].eos == 'A' )
{
// find array-indices just below the actual values of rho and e
aneos_i_rho = array_index(int_struct_t_m[i].rho, mat[MANTLE].aneos.rho, mat[MANTLE].aneos.n_rho);
aneos_i_e = array_index(e_temp, mat[MANTLE].aneos.e, mat[MANTLE].aneos.n_e);
// interpolate (bi)linearly
aneos_T = bilinear_interpolation(int_struct_t_m[i].rho, e_temp, mat[MANTLE].aneos.T, mat[MANTLE].aneos.rho, mat[MANTLE].aneos.e, aneos_i_rho, aneos_i_e, mat[MANTLE].aneos.n_rho, mat[MANTLE].aneos.n_e);
aneos_cs = bilinear_interpolation(int_struct_t_m[i].rho, e_temp, mat[MANTLE].aneos.cs, mat[MANTLE].aneos.rho, mat[MANTLE].aneos.e, aneos_i_rho, aneos_i_e, mat[MANTLE].aneos.n_rho, mat[MANTLE].aneos.n_e);
aneos_entropy = bilinear_interpolation(int_struct_t_m[i].rho, e_temp, mat[MANTLE].aneos.entropy, mat[MANTLE].aneos.rho, mat[MANTLE].aneos.e, aneos_i_rho, aneos_i_e, mat[MANTLE].aneos.n_rho, mat[MANTLE].aneos.n_e);
aneos_phase_flag = discrete_value_table_lookup(int_struct_t_m[i].rho, e_temp, mat[MANTLE].aneos.phase_flag, mat[MANTLE].aneos.rho, mat[MANTLE].aneos.e, aneos_i_rho, aneos_i_e, mat[MANTLE].aneos.n_rho, mat[MANTLE].aneos.n_e);
fprintf(tfl, "\t%.16le\t%.16le\t%.16le\t%d", aneos_T, aneos_cs, aneos_entropy, aneos_phase_flag);
}
fprintf(tfl, "\n");
}
}
else
R_t_m_des = R_t_c_des;
if( M_t_s_des/M_des > eps6 ) // if there is a shell
for(i=0; i<=NSTEPS; i++)
{
e_temp = (*(mat_fp[SHELL].eos_e_compression))(int_struct_t_s[i].rho, &mat[SHELL]);
fprintf(tfl, "%.16le\t%.16le\t%.16le\t%.16le\t%.16le", int_struct_t_s[i].r, int_struct_t_s[i].m, int_struct_t_s[i].rho, e_temp, int_struct_t_s[i].p);
if( mat[SHELL].eos == 'I' )
{
#ifdef MILUPH
fprintf(tfl, "\t%.16le", e_temp*CONVERSION_E_TO_T);
#endif
#ifdef MILUPHCUDA
fprintf(tfl, "\t%.16le", e_temp*(mat[SHELL].ideal_gas.conv_e_to_T));
#endif
}
if( mat[SHELL].eos == 'A' )
{
// find array-indices just below the actual values of rho and e
aneos_i_rho = array_index(int_struct_t_s[i].rho, mat[SHELL].aneos.rho, mat[SHELL].aneos.n_rho);
aneos_i_e = array_index(e_temp, mat[SHELL].aneos.e, mat[SHELL].aneos.n_e);
// interpolate (bi)linearly
aneos_T = bilinear_interpolation(int_struct_t_s[i].rho, e_temp, mat[SHELL].aneos.T, mat[SHELL].aneos.rho, mat[SHELL].aneos.e, aneos_i_rho, aneos_i_e, mat[SHELL].aneos.n_rho, mat[SHELL].aneos.n_e);
aneos_cs = bilinear_interpolation(int_struct_t_s[i].rho, e_temp, mat[SHELL].aneos.cs, mat[SHELL].aneos.rho, mat[SHELL].aneos.e, aneos_i_rho, aneos_i_e, mat[SHELL].aneos.n_rho, mat[SHELL].aneos.n_e);
aneos_entropy = bilinear_interpolation(int_struct_t_s[i].rho, e_temp, mat[SHELL].aneos.entropy, mat[SHELL].aneos.rho, mat[SHELL].aneos.e, aneos_i_rho, aneos_i_e, mat[SHELL].aneos.n_rho, mat[SHELL].aneos.n_e);
aneos_phase_flag = discrete_value_table_lookup(int_struct_t_s[i].rho, e_temp, mat[SHELL].aneos.phase_flag, mat[SHELL].aneos.rho, mat[SHELL].aneos.e, aneos_i_rho, aneos_i_e, mat[SHELL].aneos.n_rho, mat[SHELL].aneos.n_e);
fprintf(tfl, "\t%.16le\t%.16le\t%.16le\t%d", aneos_T, aneos_cs, aneos_entropy, aneos_phase_flag);
}
fprintf(tfl, "\n");
}
fclose(tfl);
}
else
R_t_des = R_t_c_des = R_t_m_des = 0.0;
}
else if( useProfilesFlag ) // some given profiles are used for setting the bodies' structure
{
R_p_des = PROFILE_R_P;
R_p_c_des = PROFILE_R_P_C;
R_p_m_des = PROFILE_R_P_M;
R_t_des = PROFILE_R_T;
R_t_c_des = PROFILE_R_T_C;
R_t_m_des = PROFILE_R_T_M;
}
else // a homogeneous density is assumed throughout each material
{
R_p_des = R_p_uncomp;
R_p_c_des = R_p_c_uncomp;
R_p_m_des = R_p_m_uncomp;
R_t_des = R_t_uncomp;
R_t_c_des = R_t_c_uncomp;
R_t_m_des = R_t_m_uncomp;
} // end 'if( hydrostructFlag )'
// compute desired particle numbers of proj/targ weighted by the volume of proj/targ
N_p_des = N_des * pow(R_p_des,3)/( pow(R_p_des,3) + pow(R_t_des,3) );
N_t_des = N_des * pow(R_t_des,3)/( pow(R_p_des,3) + pow(R_t_des,3) );
// allocate memory for all SPH particles in 'p'
if ( (p = (particle*)malloc(MEMALLOCFACT*N_des*sizeof(particle))) == NULL )
ERRORTEXT("ERROR during memory allocation for SPH particles!\n")
// add projectile and target spheres around the origin - positions, all velocity components = 0, and material types are added
N_p_c = N_t_c = N_p_m = N_t_m = 0;
fprintf(stdout, "--------------------------------\n");
if( ParticleGeometry == 0 ) // SC lattice
{
mpd = cbrt( (pow(R_p_des,3)+pow(R_t_des,3))/(THROVER4PI*(double)N_des) );
V_particle_p = V_particle_t = pow(mpd,3);
fprintf(stdout, "Building sphere(s) based on a simple-cubic lattice ... ");
N_p = add_sphere_particles_SC(p, mat, mpd, R_p_des, R_p_c_des, R_p_m_des, &N_p_c, &N_p_m);
N_t = add_sphere_particles_SC(p+N_p, mat, mpd, R_t_des, R_t_c_des, R_t_m_des, &N_t_c, &N_t_m);
fprintf(stdout, "Done.\n");
}
else if( ParticleGeometry == 1 ) // HCP lattice
{
mpd = cbrt( sqrt(2.0)*(pow(R_p_des,3)+pow(R_t_des,3))/(THROVER4PI*(double)N_des) );
V_particle_p = V_particle_t = pow(mpd,3)/sqrt(2.0);
fprintf(stdout, "Building sphere(s) based on a hexagonally close-packed lattice ... ");
N_p = add_sphere_particles_HCP(p, mat, mpd, R_p_des, R_p_c_des, R_p_m_des, &N_p_c, &N_p_m);
N_t = add_sphere_particles_HCP(p+N_p, mat, mpd, R_t_des, R_t_c_des, R_t_m_des, &N_t_c, &N_t_m);
fprintf(stdout, "Done.\n");
}
else if( ParticleGeometry == 2 ) // spherical shell setup with SEAGen
{
fprintf(stdout, "Building sphere(s) based on a spherical shell setup generated with SEAGen ...\n");
Py_Initialize(); // it is important to initialize (and finalize) Python only once (module stuff might get messed up otherwise)
if( !Py_IsInitialized() )
ERRORTEXT("ERROR! Unable to initialize Python interpreter.\n")
if( M_p_des/M_des > eps6 ) // if there is a projectile
N_p = add_sphere_particles_SS(p, N_p_des, R_p_des, R_p_c_des, R_p_m_des, source_dir, "projectile.SEAGen", &N_p_c, &N_p_m, &mpd_p);
if( M_t_des/M_des > eps6 ) // if there is a target
N_t = add_sphere_particles_SS(p+N_p, N_t_des, R_t_des, R_t_c_des, R_t_m_des, source_dir, "target.SEAGen", &N_t_c, &N_t_m, &mpd_t);
Py_Finalize();
V_particle_p = pow(mpd_p,3) / sqrt(2.0);
V_particle_t = pow(mpd_t,3) / sqrt(2.0);
mpd = MAX(mpd_p, mpd_t); // to be on the safe side (for setting the sml) ...
}
N = N_p+N_t;
// compute sml based on mean particle distance
mat[CORE].sml = mat[MANTLE].sml = mat[SHELL].sml = mpd * sml_factor;
// write sml to all relevant materials in materialconfiguration file for miluphcuda
#ifdef MILUPHCUDA
pasteSml(matfile, mat);
#endif
// set artificial viscosity parameters and write scenario file for miluph
#ifdef MILUPH
if ( (sfl = fopen(scenfile,"w")) == NULL )
ERRORVAR("FILE ERROR! Cannot open '%s' for writing!\n", scenfile)
mat[CORE].alpha = mat[MANTLE].alpha = mat[SHELL].alpha = ART_VISC_ALPHA;
mat[CORE].beta = mat[MANTLE].beta = mat[SHELL].beta = ART_VISC_BETA;
write_scenario_data(sfl, mat, NMAT);
fclose(sfl);
#endif
// if in N_input mode, either create SPH particles for the additional bodies/pointmasses,
// or write their properties to a separate output file (pointmasses-file)
if( N_input == TRUE && M_output == FALSE ) // create SPH particles for additional pointmasses
{
for(i=2; i<N_bodies; i++)
{
for(j=0; j<DIM; j++)
{
p[N].x[j] = N_input_data[i].x[j];
p[N].v[j] = N_input_data[i].v[j];
}
p[N].mat_type = N_BODIES_MATTYPE;
p[N].rho = N_BODIES_RHO;
p[N].mass = N_input_data[i].mass;
p[N].e = 0.0;
N++;
}
if( (N_bodies-2 + N_p + N_t) != N )
ERRORTEXT("ERROR! Strange particle number mismatch in N_bodies mode.\n")
}
else if( N_input == TRUE && M_output == TRUE ) // write additional bodies/pointmasses to pointmasses-file
{
FILE *tmp_f;
if ( (tmp_f = fopen(pointmassesfile,"w")) == NULL )
ERRORVAR("FILE ERROR! Cannot open '%s' for writing!\n", pointmassesfile)
for(i=2; i<N_bodies; i++)
{
for(j=0; j<DIM; j++)
fprintf(tmp_f, "%.16le\t", N_input_data[i].x[j]);
for(j=0; j<DIM; j++)
fprintf(tmp_f, "%.16le\t", N_input_data[i].v[j]);
fprintf(tmp_f, "%.16le\t", N_input_data[i].mass);
fprintf(tmp_f, "%.16le\t", N_BODIES_RMIN);
fprintf(tmp_f, "%.16le\t", N_BODIES_RMAX);
fprintf(tmp_f, "%d\n", N_BODIES_FEELING_FLAG);
}
fclose(tmp_f);
}
// make sure that the allocated memory (at p) for the particles was sufficient
if ( MEMALLOCFACT*N_des < N )
ERRORTEXT("ERROR! Too little memory has been originally allocated for the actual number of particles.\n")
// calculate and assign densities, masses and internal energies to the proj/targ particles
if( hydrostructFlag ) // the density distribution follows the correct hydrostatic structure
{
// assign densities, masses and internal energies to the projectile's particles, assuming the hydrostatic structure calculated above
for(i=0; i<N_p; i++)
{
r2 = p[i].x[0]*p[i].x[0] + p[i].x[1]*p[i].x[1] + p[i].x[2]*p[i].x[2];
if( (r2 < R_p_c_des*R_p_c_des) && (M_p_c_des/M_des > eps6) && (p[i].mat_type == mat[CORE].mat_type) ) // if particle is in the projectile's core
{
set_hydrostruct_density(p, i, r2, int_struct_p_c);
p[i].e = (*(mat_fp[CORE].eos_e_compression))(p[i].rho, &mat[CORE]);
}
else if( (r2 < R_p_m_des*R_p_m_des) && (M_p_m_des/M_des > eps6) && (p[i].mat_type == mat[MANTLE].mat_type) ) // if particle is in the projectile's mantle
{
set_hydrostruct_density(p, i, r2, int_struct_p_m);
p[i].e = (*(mat_fp[MANTLE].eos_e_compression))(p[i].rho, &mat[MANTLE]);
}
else if( (r2 < R_p_des*R_p_des) && (M_p_s_des/M_des > eps6) && (p[i].mat_type == mat[SHELL].mat_type) ) // if particle is in the projectile's shell
{
set_hydrostruct_density(p, i, r2, int_struct_p_s);
p[i].e = (*(mat_fp[SHELL].eos_e_compression))(p[i].rho, &mat[SHELL]);
}
else
ERRORTEXT("ERROR when assigning densities, masses and internal energies (calculated via hydrostatic structure) to the projectile's particles! Particle characteristics seem to match neither the projectile's core nor mantle nor shell perfectly ...\n")
p[i].mass = p[i].rho * V_particle_p;
}
// assign densities, masses and internal energies to the target's particles, assuming the hydrostatic structure calculated above
for( i=N_p; i<(N_p+N_t); i++)
{
r2 = p[i].x[0]*p[i].x[0] + p[i].x[1]*p[i].x[1] + p[i].x[2]*p[i].x[2];
if( (r2 < R_t_c_des*R_t_c_des) && (M_t_c_des/M_des > eps6) && (p[i].mat_type == mat[CORE].mat_type) ) // if particle is in the target's core
{
set_hydrostruct_density(p, i, r2, int_struct_t_c);
p[i].e = (*(mat_fp[CORE].eos_e_compression))(p[i].rho, &mat[CORE]);
}
else if( (r2 < R_t_m_des*R_t_m_des) && (M_t_m_des/M_des > eps6) && (p[i].mat_type == mat[MANTLE].mat_type) ) // if particle is in the target's mantle
{
set_hydrostruct_density(p, i, r2, int_struct_t_m);
p[i].e = (*(mat_fp[MANTLE].eos_e_compression))(p[i].rho, &mat[MANTLE]);
}
else if( (r2 < R_t_des*R_t_des) && (M_t_s_des/M_des > eps6) && (p[i].mat_type == mat[SHELL].mat_type) ) // if particle is in the target's shell
{
set_hydrostruct_density(p, i, r2, int_struct_t_s);
p[i].e = (*(mat_fp[SHELL].eos_e_compression))(p[i].rho, &mat[SHELL]);
}
else
ERRORTEXT("ERROR when assigning densities, masses and internal energies (calculated via hydrostatic structure) to the target's particles! Particle characteristics seem to match neither the target's core nor mantle nor shell perfectly ...\n")
p[i].mass = p[i].rho * V_particle_t;
}
}
else if( useProfilesFlag ) // given profiles are used for setting the bodies structure
{
// assign densities, masses and energies to the projectile's particles, following the given profile
for(i=0; i<N_p; i++)
{
r2 = p[i].x[0]*p[i].x[0] + p[i].x[1]*p[i].x[1] + p[i].x[2]*p[i].x[2];
set_profile_rho_e(p, i, r2, profile_projectile, p_profile_no_points);
p[i].mass = p[i].rho * V_particle_p;
}
// assign densities, masses, and energies to the target's particles, following the given profile
for( i=N_p; i<(N_p+N_t); i++)
{
r2 = p[i].x[0]*p[i].x[0] + p[i].x[1]*p[i].x[1] + p[i].x[2]*p[i].x[2];
set_profile_rho_e(p, i, r2, profile_target, t_profile_no_points);
p[i].mass = p[i].rho * V_particle_t;
}
}
else // homogeneous density is assumed throughout each material
{
M_particle_p[CORE] = V_particle_p*mat[CORE].rho_0;
M_particle_p[MANTLE] = V_particle_p*mat[MANTLE].rho_0;
M_particle_p[SHELL] = V_particle_p*mat[SHELL].rho_0;
M_particle_t[CORE] = V_particle_t*mat[CORE].rho_0;
M_particle_t[MANTLE] = V_particle_t*mat[MANTLE].rho_0;
M_particle_t[SHELL] = V_particle_t*mat[SHELL].rho_0;
for( i=0; i<(N_p+N_t); i++)
{
if( p[i].mat_type == mat[CORE].mat_type )
{
if( i < N_p ) // projectile
p[i].mass = M_particle_p[CORE];
if( i >= N_p ) // target
p[i].mass = M_particle_t[CORE];
p[i].rho = mat[CORE].rho_0;
}
else if( p[i].mat_type == mat[MANTLE].mat_type )
{
if( i < N_p ) // projectile
p[i].mass = M_particle_p[MANTLE];
if( i >= N_p ) // target
p[i].mass = M_particle_t[MANTLE];
p[i].rho = mat[MANTLE].rho_0;
}
else if( p[i].mat_type == mat[SHELL].mat_type )
{
if( i < N_p ) // projectile
p[i].mass = M_particle_p[SHELL];
if( i >= N_p ) // target
p[i].mass = M_particle_t[SHELL];
p[i].rho = mat[SHELL].rho_0;
}
// assign internal energies depending on eos
if( (p[i].mat_type==mat[CORE].mat_type) && (mat[CORE].eos=='A') )
p[i].e = mat[CORE].aneos.e_norm;
else if( (p[i].mat_type==mat[MANTLE].mat_type) && (mat[MANTLE].eos == 'A') )
p[i].e = mat[MANTLE].aneos.e_norm;
else if( (p[i].mat_type==mat[SHELL].mat_type) && (mat[SHELL].eos=='A') )
p[i].e = mat[SHELL].aneos.e_norm;
else if( (p[i].mat_type==mat[CORE].mat_type) && (mat[CORE].eos=='I') )
p[i].e = e_compression_ideal_gas(p[i].rho, &mat[CORE]);
else if( (p[i].mat_type==mat[MANTLE].mat_type) && (mat[MANTLE].eos=='I') )
p[i].e = e_compression_ideal_gas(p[i].rho, &mat[MANTLE]);
else if( (p[i].mat_type==mat[SHELL].mat_type) && (mat[SHELL].eos=='I') )
p[i].e = e_compression_ideal_gas(p[i].rho, &mat[SHELL]);
else
p[i].e = 0.0;
}
} // end 'if( hydrostructFlag )'
// rotate projectile and target about fixed angles if defined
#ifdef ROTATED_CONFIGURATION
rotate_sphere(p, N_p, P_Z_ANGLE*M_PI/180.0, P_Y_ANGLE*M_PI/180.0, P_X_ANGLE*M_PI/180.0); // rotate projectile
rotate_sphere(p+N_p, N_t, T_Z_ANGLE*M_PI/180.0, T_Y_ANGLE*M_PI/180.0, T_X_ANGLE*M_PI/180.0); // rotate target
#endif
// generate projectile/target rotation if set
if( p_rot_period > 0.0 )
{
for(i=0; i<DIM; i++)
p_omega[i] = p_rot_axis[i];
// give 'p_omega' the proper length to make it the angular velocity vector
tmp = sqrt( p_omega[0]*p_omega[0] + p_omega[1]*p_omega[1] + p_omega[2]*p_omega[2] );
for(i=0; i<DIM; i++)
p_omega[i] *= 2.0 * M_PI / p_rot_period / tmp;
// compute particle velocities via cross-product (v = omega x r)
for(i=0; i<N_p; i++)
{
p[i].v[0] = p_omega[1]*p[i].x[2] - p_omega[2]*p[i].x[1];
p[i].v[1] = p_omega[2]*p[i].x[0] - p_omega[0]*p[i].x[2];
p[i].v[2] = p_omega[0]*p[i].x[1] - p_omega[1]*p[i].x[0];
}
}
if( t_rot_period > 0.0 )
{
for(i=0; i<DIM; i++)
t_omega[i] = t_rot_axis[i];
// give 't_omega' the proper length to make it the angular velocity vector
tmp = sqrt( t_omega[0]*t_omega[0] + t_omega[1]*t_omega[1] + t_omega[2]*t_omega[2] );
for(i=0; i<DIM; i++)
t_omega[i] *= 2.0 * M_PI / t_rot_period / tmp;
// compute particle velocities via cross-product (v = omega x r)
for(i=N_p; i<(N_p+N_t); i++)
{
p[i].v[0] = t_omega[1]*p[i].x[2] - t_omega[2]*p[i].x[1];
p[i].v[1] = t_omega[2]*p[i].x[0] - t_omega[0]*p[i].x[2];
p[i].v[2] = t_omega[0]*p[i].x[1] - t_omega[1]*p[i].x[0];
}
}
// calculate corrected masses and radii of proj/targ due to the actual particle numbers (which in general deviate from the desired ones)
if( hydrostructFlag || useProfilesFlag )
{
M_p = M_p_c = M_p_m = 0.0;
for(i=0; i<N_p; i++)
{
M_p += p[i].mass;
if( p[i].mat_type == mat[CORE].mat_type )
M_p_c += p[i].mass;
if( p[i].mat_type == mat[MANTLE].mat_type )
M_p_m += p[i].mass;
}
M_t = M_t_c = M_t_m = 0.0;
for(i=N_p; i<(N_p+N_t); i++)
{
M_t += p[i].mass;
if( p[i].mat_type == mat[CORE].mat_type )
M_t_c += p[i].mass;
if( p[i].mat_type == mat[MANTLE].mat_type )
M_t_m += p[i].mass;
}
}
else
{
M_p_c = N_p_c * M_particle_p[CORE];
M_t_c = N_t_c * M_particle_t[CORE];
M_p_m = N_p_m * M_particle_p[MANTLE];
M_t_m = N_t_m * M_particle_t[MANTLE];
M_p = M_p_c + M_p_m + (N_p-N_p_c-N_p_m)*M_particle_p[SHELL];
M_t = M_t_c + M_t_m + (N_t-N_t_c-N_t_m)*M_particle_t[SHELL];
}
M = M_p + M_t;
if( ParticleGeometry == 0 ) // SC lattice
{
// Calculation of the actual radii is identical with and without HYDROSTRUCT (or given radial profiles) since "ideal" lattices are implemented in both cases - the mpd is simply a bit smaller with HYDROSTRUCT
R_p_c = mpd * cbrt( THROVER4PI*N_p_c );
R_t_c = mpd * cbrt( THROVER4PI*N_t_c );
R_p_m = mpd * cbrt( THROVER4PI*(N_p_c+N_p_m) ); // because core/mantle/shell particles all take the same volume
R_t_m = mpd * cbrt( THROVER4PI*(N_t_c+N_t_m) );
R_p = mpd * cbrt( THROVER4PI*N_p );
R_t = mpd * cbrt( THROVER4PI*N_t );
}
else if( ParticleGeometry == 1 ) // HCP lattice
{
R_p_c = mpd * cbrt( THROVER4PI*N_p_c/sqrt(2.0) );
R_t_c = mpd * cbrt( THROVER4PI*N_t_c/sqrt(2.0) );
R_p_m = mpd * cbrt( THROVER4PI*(N_p_c+N_p_m)/sqrt(2.0) ); // because core/mantle/shell particles all take the same volume
R_t_m = mpd * cbrt( THROVER4PI*(N_t_c+N_t_m)/sqrt(2.0) );
R_p = mpd * cbrt( THROVER4PI*N_p/sqrt(2.0) );
R_t = mpd * cbrt( THROVER4PI*N_t/sqrt(2.0) );
}
else if( ParticleGeometry == 2 ) // spherical shell setup
{
R_p_c = cbrt( N_p_c*V_particle_p*THROVER4PI );
R_t_c = cbrt( N_t_c*V_particle_t*THROVER4PI );
R_p_m = cbrt( (N_p_c+N_p_m)*V_particle_p*THROVER4PI );
R_t_m = cbrt( (N_t_c+N_t_m)*V_particle_t*THROVER4PI );
R_p = cbrt( N_p*V_particle_p*THROVER4PI );
R_t = cbrt( N_t*V_particle_t*THROVER4PI );
}
// if not in N_input mode, move proj and targ to the initial positions and velocities (where the targ is at the origin at rest)
if( !N_input )
{
// calculate initial proj position (where the targ is at the origin)
// NOTE: There's a minimum possible distance given by their radii + an additional sml distance.
ini_dist = R_p + R_t + 1.5*MAX(MAX(mat[CORE].sml,mat[SHELL].sml),mat[MANTLE].sml); // minimum possible initial distance
des_ini_dist = (R_p + R_t)*ini_dist_fact;
if ( des_ini_dist > ini_dist )
ini_dist = des_ini_dist;
if ( vel_vesc_angle ) // compute initial geometry in case of v/v_esc and impact angle as input parameters
{
collision_geometry(M_p, M_t, R_p, R_t, ini_dist, vel_vesc, impact_angle, &impact_par, &ini_vel, &impact_vel_abs);
ini_vel = -1.0*ini_vel;
}
if ( ini_dist < impact_par ) // make sure that the actual initial distance is not smaller than the impact parameter
ERRORTEXT("ERROR. The spheres' initial distance is smaller than the impact parameter. Geometrically impossible!\n")
ini_pos_p[0] = impact_par;
ini_pos_p[1] = sqrt(ini_dist*ini_dist - impact_par*impact_par);
ini_pos_p[2] = 0.0;
// now move the projectile to its initial position and set the initial velocity (ini_vel) in y-direction
for(i=0; i<N_p; i++)
{
for(j=0; j<DIM; j++)
p[i].x[j] += ini_pos_p[j];
p[i].v[1] += ini_vel;
}
// set like that for now, but will be corrected below along with barycentric correction
for(i=0; i<DIM; i++) {
proj_pos_final[i] = ini_pos_p[i];
targ_pos_final[i] = 0.0;
targ_vel_final[i] = 0.0;
}
proj_vel_final[0] = proj_vel_final[2] = 0.0;
proj_vel_final[1] = ini_vel;
}
// if in N_input mode, move projectile and target to the correct positions and velocities (from the coordinates-file)
if( N_input )
{
for(i=0; i<N_p; i++)
for(j=0; j<DIM; j++) {
p[i].x[j] += N_input_data[0].x[j];
p[i].v[j] += N_input_data[0].v[j];
}
for( i=N_p; i<(N_p+N_t); i++)
for( j=0; j<DIM; j++) {
p[i].x[j] += N_input_data[1].x[j];
p[i].v[j] += N_input_data[1].v[j];
}
for( i=0; i<DIM; i++) {
proj_pos_final[i] = N_input_data[0].x[i];
proj_vel_final[i] = N_input_data[0].v[i];
targ_pos_final[i] = N_input_data[1].x[i];
targ_vel_final[i] = N_input_data[1].v[i];
}
}
// apply a barycentric correction, i.e. transform pos and vel of all particles to a frame barycentric w.r.t. proj + target
// first calculate the (proj + target) center of mass' postition and velocity in the initial frame
for( i=0; i<DIM; i++)
baryc_x[i] = baryc_v[i] = 0.0;
for( i=0; i<(N_p+N_t); i++)
for( j=0; j<DIM; j++) {
baryc_x[j] += p[i].mass * p[i].x[j];
baryc_v[j] += p[i].mass * p[i].v[j];
}
for( i=0; i<DIM; i++) {
baryc_x[i] /= M;
baryc_v[i] /= M;
}
// now perform a Galilean transformation (here: x'=x-x_b and v'=v-v_b) to the center-of-mass frame (x' and v') for all particles
if( !N_input || (N_input && b_flag) )
{
for( i=0; i<N; i++)
for( j=0; j<DIM; j++) {
p[i].x[j] -= baryc_x[j];
p[i].v[j] -= baryc_v[j];
}
for( i=0; i<DIM; i++) {
proj_pos_final[i] -= baryc_x[i];
proj_vel_final[i] -= baryc_v[i];
targ_pos_final[i] -= baryc_x[i];
targ_vel_final[i] -= baryc_v[i];
}
}
// initialize yet missing components of the particle array
for( i=0; i<N; i++)
{
p[i].damage = 0.0;
#ifdef MILUPH
p[i].plastic_strain = 0.0;
p[i].temp = TEMP;
#endif
for( j=0; j<DIM; j++)
for( k=0; k<DIM; k++)
p[i].S[j][k] = 0.0;
p[i].flaws.n_flaws = 0;
}
// print scenario information
fprintf(stdout, "----------------------------------------------------------------\n");
fprintf(stdout, "Particle numbers:\n");
if( !N_input )
fprintf(stdout, " desired total N = %d\t actual/final total N = %d\n", N_des, N);
else if( N_input && !M_output )
fprintf(stdout, " desired N_p+N_t = %d\t actual/final N_p+N_t = %d\t N_other_bodies (added as SPH particles) = %d\t actual/final total N = %d\n", N_des, N_p+N_t, N_bodies-2, N);
else if( N_input && M_output )
fprintf(stdout, " desired N_p+N_t = %d\t actual/final N_p+N_t = %d\t N_other_bodies (written to pointmasses-file) = %d\n", N_des, N, N_bodies-2);
fprintf(stdout, " projectile: N_des = %d\t N = %d\t N_core = %d\t N_mantle = %d\t N_shell = %d\n", N_p_des, N_p, N_p_c, N_p_m, N_p-N_p_c-N_p_m);
fprintf(stdout, " target: N_des = %d\t N = %d\t N_core = %d\t N_mantle = %d\t N_shell = %d\n", N_t_des, N_t, N_t_c, N_t_m, N_t-N_t_c-N_t_m);
fprintf(stdout, "----------------------------------------------------------------\n");
fprintf(stdout, "Materials:\n");
fprintf(stdout, " core/mantle/shell: \"%s\"/\"%s\"/\"%s\"\n", mat[CORE].mat_name, mat[MANTLE].mat_name, mat[SHELL].mat_name);
fprintf(stdout, " core: mat. type = %d\t rho_0 = %g\t cs = %e\t eos = %c\n", mat[CORE].mat_type, mat[CORE].rho_0, mat[CORE].cs, mat[CORE].eos);
if( mat[CORE].eos == 'A' && hydrostructFlag )
fprintf(stdout, " table-file = %s\t n_rho = %d\t n_e = %d\n", mat[CORE].aneos.table_file, mat[CORE].aneos.n_rho, mat[CORE].aneos.n_e);
if( mat[CORE].eos == 'I' )
fprintf(stdout, " p_0 = %g\t gamma = %g\t polytropic_K = %g\n", mat[CORE].ideal_gas.p_0, mat[CORE].ideal_gas.gamma, mat[CORE].ideal_gas.polytropic_K);
fprintf(stdout, " mantle: mat. type = %d\t rho_0 = %g\t cs = %e\t eos = %c\n", mat[MANTLE].mat_type, mat[MANTLE].rho_0, mat[MANTLE].cs, mat[MANTLE].eos);
if( mat[MANTLE].eos == 'A' && hydrostructFlag )
fprintf(stdout, " table-file = %s\t n_rho = %d\t n_e = %d\n", mat[MANTLE].aneos.table_file, mat[MANTLE].aneos.n_rho, mat[MANTLE].aneos.n_e);
if( mat[MANTLE].eos == 'I' )
fprintf(stdout, " p_0 = %g\t gamma = %g\t polytropic_K = %g\n", mat[MANTLE].ideal_gas.p_0, mat[MANTLE].ideal_gas.gamma, mat[MANTLE].ideal_gas.polytropic_K);
fprintf(stdout, " shell: mat. type = %d\t rho_0 = %g\t cs = %e\t eos = %c\n", mat[SHELL].mat_type, mat[SHELL].rho_0, mat[SHELL].cs, mat[SHELL].eos);
if( mat[SHELL].eos == 'A' && hydrostructFlag )
fprintf(stdout, " table-file = %s\t n_rho = %d\t n_e = %d\n", mat[SHELL].aneos.table_file, mat[SHELL].aneos.n_rho, mat[SHELL].aneos.n_e);
if( mat[SHELL].eos == 'I' )
fprintf(stdout, " p_0 = %g\t gamma = %g\t polytropic_K = %g\n", mat[SHELL].ideal_gas.p_0, mat[SHELL].ideal_gas.gamma, mat[SHELL].ideal_gas.polytropic_K);
#ifdef MILUPH
fprintf(stdout, " all: artificial viscosity: alpha = %g\n", mat->alpha);
fprintf(stdout, " beta = %g\n", mat->beta);
#endif
fprintf(stdout, "----------------------------------------------------------------\n");
fprintf(stdout, "Masses:\n");
fprintf(stdout, " total: desired: M = %e\n", M_des);
fprintf(stdout, " actual/final: M = %e\n", M);
fprintf(stdout, " projectile: desired: M = %e\t M_core = %e\t M_mantle = %e\t M_shell = %e\n", M_p_des, M_p_c_des, M_p_m_des, M_p_s_des);
fprintf(stdout, " actual/final: M = %e\t M_core = %e\t M_mantle = %e\t M_shell = %e\n", M_p, M_p_c, M_p_m, M_p-M_p_c-M_p_m);
fprintf(stdout, " target: desired: M = %e\t M_core = %e\t M_mantle = %e\t M_shell = %e\n", M_t_des, M_t_c_des, M_t_m_des, M_t_s_des);
fprintf(stdout, " actual/final: M = %e\t M_core = %e\t M_mantle = %e\t M_shell = %e\n", M_t, M_t_c, M_t_m, M_t-M_t_c-M_t_m);
if( !hydrostructFlag && !useProfilesFlag )
{
fprintf(stdout, " single particle masses: projectile: core = %e\t mantle = %e\t shell = %e\n", M_particle_p[CORE], M_particle_p[MANTLE], M_particle_p[SHELL]);
fprintf(stdout, " target: core = %e\t mantle = %e\t shell = %e\n", M_particle_t[CORE], M_particle_t[MANTLE], M_particle_t[SHELL]);
}
fprintf(stdout, "Mantle/shell mass fractions:\n");
fprintf(stdout, " projectile: mantle: desired = %g\t actual/final = %g\n", C_p_m_des, M_p_m/M_p);
fprintf(stdout, " shell: desired = %g\t actual/final = %g\n", C_p_s_des, (M_p-M_p_c-M_p_m)/M_p);
fprintf(stdout, " target: mantle: desired = %g\t actual/final = %g\n", C_t_m_des, M_t_m/M_t);
fprintf(stdout, " shell: desired = %g\t actual/final = %g\n", C_t_s_des, (M_t-M_t_c-M_t_m)/M_t);
fprintf(stdout, "----------------------------------------------------------------\n");
fprintf(stdout, "Radii:\n");
fprintf(stdout, " projectile: desired: R = %e\t R_core = %e\t R_mantle = %e\n", R_p_des, R_p_c_des, R_p_m_des);
fprintf(stdout, " actual/final: R = %e\t R_core = %e\t R_mantle = %e\n", R_p, R_p_c, R_p_m);
fprintf(stdout, " target: desired: R = %e\t R_core = %e\t R_mantle = %e\n", R_t_des, R_t_c_des, R_t_m_des);
fprintf(stdout, " actual/final: R = %e\t R_core = %e\t R_mantle = %e\n", R_t, R_t_c, R_t_m);
fprintf(stdout, " sum of actual/final radii = %e\n", R_p+R_t);
fprintf(stdout, "----------------------------------------------------------------\n");
fprintf(stdout, "Geometry:\n");
if( N_input )
{
fprintf(stdout, " Initial positions and velocities of proj + target + additional pointmasses are given in cartesian coordinates via file '%s'.\n", coordfile);
if( b_flag )
fprintf(stdout, " This whole arrangement is set up in a frame that is (initially) barycentric w.r.t. proj + target alone.\n");
if( M_output )
fprintf(stdout, " Data on the %d additional pointmasses is written to the file '%s' for processing by miluphcuda.\n", N_bodies-2, pointmassesfile);
else
fprintf(stdout, " The %d additional pointmasses are included as single SPH particles each.\n", N_bodies-2);
fprintf(stdout, "\n The relative two-body orbit of proj + targ (i.e., neglecting all additional pointmasses) is used to estimate collision parameters ...\n");
collision_parameters_from_cartesian(M_p, M_t, R_p, R_t, N_input_data[0].x, N_input_data[1].x, N_input_data[0].v, N_input_data[1].v, &N_input_impact_angle, &N_input_impact_vel_vesc, &N_input_impact_vel_abs);
if( N_input_impact_angle >= 0.0 ) {
fprintf(stdout, " It's a physical collision (pericenter distance < R_p+R_t) with parameters:\n");
fprintf(stdout, " impact angle = %e deg\n impact velocity = %e\n v/v_esc = %e\n", N_input_impact_angle*180.0/M_PI, N_input_impact_vel_abs, N_input_impact_vel_vesc);
fprintf(stdout, " collision timescale (R_p+R_t)/|v_imp| = %g sec\n\n", (R_p+R_t)/fabs(N_input_impact_vel_abs) );
}
else {
fprintf(stdout, " It's NOT a physical collision (pericenter distance > R_p+R_t = %e) with parameters at pericenter:\n", R_p+R_t);
fprintf(stdout, " relative velocity = %e\n v/v_esc = %e\n", N_input_impact_vel_abs, N_input_impact_vel_vesc);
fprintf(stdout, " collision timescale (R_p+R_t)/|v_pericenter| = %g sec\n\n", (R_p+R_t)/fabs(N_input_impact_vel_abs) );
}
}
else
{
if( vel_vesc_angle )
fprintf(stdout, " At \"touching ball\" distance (R_p+R_t = %e):\n v_imp = %e\n v_imp/v_esc = %e\n impact angle = %e deg\n", R_p+R_t, impact_vel_abs, vel_vesc, impact_angle);
fprintf(stdout, " At initial distance (ini_dist = %e):\n ini_vel = %e\n impact parameter = %e\n", ini_dist, ini_vel, impact_par);
fprintf(stdout, " collision timescale (R_p+R_t)/|v_imp| = %g sec\n", (R_p+R_t)/fabs(impact_vel_abs) );
}
if( !N_input )
fprintf(stdout, " projectile position before barycentric correction = %24.16le %24.16le %24.16le\n", ini_pos_p[0], ini_pos_p[1], ini_pos_p[2]);
if( !N_input || (N_input && b_flag) )
{
fprintf(stdout, " Barycentric correction applied (w.r.t. proj + target). Barycenter initially at:\n");
fprintf(stdout, " x/y/z = %24.16le %24.16le %24.16le\n", baryc_x[0], baryc_x[1], baryc_x[2]);
fprintf(stdout, " vx/vy/vz = %24.16le %24.16le %24.16le\n", baryc_v[0], baryc_v[1], baryc_v[2]);
}
fprintf(stdout, " Final positions and velocities:\n");
fprintf(stdout, " projectile: x/y/z = %24.16le %24.16le %24.16le vx/vy/vz = %24.16le %24.16le %24.16le\n",
proj_pos_final[0], proj_pos_final[1], proj_pos_final[2], proj_vel_final[0], proj_vel_final[1], proj_vel_final[2] );
fprintf(stdout, " target: x/y/z = %24.16le %24.16le %24.16le vx/vy/vz = %24.16le %24.16le %24.16le\n",
targ_pos_final[0], targ_pos_final[1], targ_pos_final[2], targ_vel_final[0], targ_vel_final[1], targ_vel_final[2] );
fprintf(stdout, "----------------------------------------------------------------\n");
if( ParticleGeometry == 0 )
fprintf(stdout, "Initial lattice structure:\n SIMPLE CUBIC\n");
if( ParticleGeometry == 1 )
fprintf(stdout, "Initial lattice structure:\n HEXAGONALLY CLOSE-PACKED\n");
if( ParticleGeometry == 2 )
fprintf(stdout, "Initial particle geometry:\n SPHERICAL SHELL SETUP with SEAGen\n");
fprintf(stdout, " mean particle dist. mpd = %e\t sml = %e ( = mpd * %e )\n", mpd, mat->sml, sml_factor);
if( ParticleGeometry == 2 ) // spherical shell setup
fprintf(stdout, " ( mpd = MAX(mpd-proj,mpd-targ) = MAX(%e,%e) )\n", mpd_p, mpd_t);
#ifdef ROTATED_CONFIGURATION
fprintf(stdout, "----------------------------------------------------------------\n");
fprintf(stdout, "Rotated (by a fixed angle) initial configuration used. Angles (deg):\n");
fprintf(stdout, " target: z/y/x = %g %g %g\n", T_Z_ANGLE, T_Y_ANGLE, T_X_ANGLE);
fprintf(stdout, " projectile: z/y/x = %g %g %g\n", P_Z_ANGLE, P_Y_ANGLE, P_X_ANGLE);
#endif
fprintf(stdout, "----------------------------------------------------------------\n");
fprintf(stdout, "Initial rotation:\n");
if( p_rot_period > 0.0 ) {
fprintf(stdout, " projectile: period = %g sec\n", p_rot_period);
fprintf(stdout, " rotation-axis = %g %g %g\n", p_rot_axis[0], p_rot_axis[1], p_rot_axis[2] );
} else {
fprintf(stdout, " None for projectile.\n");
}
if( t_rot_period > 0.0 ) {
fprintf(stdout, " target: period = %g sec\n", t_rot_period);
fprintf(stdout, " rotation-axis = %g %g %g\n", t_rot_axis[0], t_rot_axis[1], t_rot_axis[2] );
} else {
fprintf(stdout, " None for target.\n");
}
fprintf(stdout, "----------------------------------------------------------------\n");
fprintf(stdout, "Relaxation technique:\n");
if( hydrostructFlag ) {
fprintf(stdout, " Calculate hydrostatic structure and set particle densities/masses accordingly.\n");
fprintf(stdout, " Calculate and set internal energies following adiabatic compression.\n");
} else if( useProfilesFlag ) {
fprintf(stdout, " Use given radial profiles to set densities, masses, and internal energies.\n");
} else {
fprintf(stdout, " None.\n");
}
fprintf(stdout, "----------------------------------------------------------------\n");
fprintf(stdout, "Damage model:\n");
fprintf(stdout, " weibulling core material: ");
if (weibull_core == 1)
fprintf(stdout, "yes\t k = %g\t m = %g\n", mat[CORE].k, mat[CORE].m);
else
fprintf(stdout, "no\n");
fprintf(stdout, " weibulling mantle material: ");
if (weibull_mantle == 1)
fprintf(stdout, "yes\t k = %g\t m = %g\n", mat[MANTLE].k, mat[MANTLE].m);
else
fprintf(stdout, "no\n");
fprintf(stdout, " weibulling shell material: ");
if (weibull_shell == 1)
fprintf(stdout, "yes\t k = %g\t m = %g\n", mat[SHELL].k, mat[SHELL].m);
else
fprintf(stdout, "no\n");
fprintf(stdout, "----------------------------------------------------------------\n");
if( N_input == TRUE ) // compute ini_vel for courant-like criterion
{
for(i=0; i<DIM; i++)
ini_vel_vec[i] = N_input_data[0].v[i] - N_input_data[1].v[i];
ini_vel = sqrt(ini_vel_vec[0]*ini_vel_vec[0] + ini_vel_vec[1]*ini_vel_vec[1] + ini_vel_vec[2]*ini_vel_vec[2]);
}
fprintf(stdout, "A courant-like criterion suggests:\t Delta_t < %e\n", mpd/MAX( MAX(mat[CORE].cs, MAX(mat[MANTLE].cs,mat[SHELL].cs) ),fabs(ini_vel) ));
// weibull particles if desired and write (weibulled or not) data to output file
V_p_c_uncomp = M_p_c/mat[CORE].rho_0;
V_p_m_uncomp = M_p_m/mat[MANTLE].rho_0;
V_p_s_uncomp = (M_p-M_p_c-M_p_m)/mat[SHELL].rho_0;
V_t_c_uncomp = M_t_c/mat[CORE].rho_0;
V_t_m_uncomp = M_t_m/mat[MANTLE].rho_0;
V_t_s_uncomp = (M_t-M_t_c-M_t_m)/mat[SHELL].rho_0;
if( weibull_core == 1 )
{
weibull_particles(p, &mat[CORE], V_p_c_uncomp, N_p, N_p_c, " the projectile's core,"); // weibull projectile's core
weibull_particles(p+N_p, &mat[CORE], V_t_c_uncomp, N_t, N_t_c, " the target's core,"); // weibull target's core
}
if( weibull_mantle == 1 )
{
weibull_particles(p, &mat[MANTLE], V_p_m_uncomp, N_p, N_p_m, " the projectile's mantle,"); // weibull projectile's mantle
weibull_particles(p+N_p, &mat[MANTLE], V_t_m_uncomp, N_t, N_t_m, " the target's mantle,"); // weibull target's mantle
}
if( weibull_shell == 1 )
{
weibull_particles(p, &mat[SHELL], V_p_s_uncomp, N_p, N_p-N_p_c-N_p_m, " the projectile's shell,"); // weibull projectile's shell
weibull_particles(p+N_p, &mat[SHELL], V_t_s_uncomp, N_t, N_t-N_t_c-N_t_m, " the target's shell,"); // weibull target's shell
}
if ( (ofl = fopen(outfile,"w")) == NULL )
ERRORVAR("FILE ERROR! Cannot open '%s' for writing!\n", outfile)
write_outfile(ofl, p, N, OutputMode); // write whole p (all information on all particles) to the output file
fclose(ofl);
// clean up
#ifdef MILUPH
if( mat[CORE].eos == 'A' && hydrostructFlag )
free_ANEOS_table_memory(&mat[CORE]);
if( mat[MANTLE].eos == 'A' && hydrostructFlag )
free_ANEOS_table_memory(&mat[MANTLE]);
if( mat[SHELL].eos == 'A' && hydrostructFlag )
free_ANEOS_table_memory(&mat[SHELL]);
#endif
#ifdef MILUPHCUDA
if( allocated_ANEOS_mem_core_flag )
free_ANEOS_table_memory(&mat[CORE]);
if( allocated_ANEOS_mem_mantle_flag )
free_ANEOS_table_memory(&mat[MANTLE]);
if( allocated_ANEOS_mem_shell_flag )
free_ANEOS_table_memory(&mat[SHELL]);
#endif
free(p);
if( N_input == TRUE )
free(N_input_data);
if( useProfilesFlag )
{
if( PROFILE_FILE_PROJ != 0 ) // there is a projectile
free(profile_projectile);
if( PROFILE_FILE_TARG != 0 ) // there is a target
free(profile_target);
}
return(0);
} // end 'main()'
void weibull_particles(particle* p, material* mat, double volume, int n_all_p, int n_mat_p, const char* message)
// Distributes flaws following the Weibull distribution to particles starting at address p of a certain material mat->mat_type until every particle of that material (mat) has at least one flaw.
// n_all_p is the overall particle number to consider (starting at address p), n_mat_p is the number of particles of the material type that is expected to/shall receive flaws.
// volume contains the total volume of the material that shall be weibulled, message optionally contains text to insert in the output message below.
{
int i,j;
int n_miss = n_mat_p; // n_miss = number of particles left without any flaws
double act_thr;
// checking whether the compiler-dependent resolution of the rand() generator poses a threat to the statistical quality of the random numbers:
// see below: n_all_p/(RAND_MAX+1) produces a constant<1 (otherwise it would fail entirely). Multiplied by rand() and then truncated some indices become more likely than others!
// e.g. drand48 would be an alternative ...
if (n_all_p > 0.1*RAND_MAX)
{
fprintf(stderr, "WARNING! The random number generator for weibulling produces int values only in the range [0,%d]. ", RAND_MAX);
fprintf(stderr, "This limited resolution could threaten the statistical quality. Program stopped, think about it.\n");
exit(1);
}
// checking whether the number of particles in p with desired material equals n_mat_p:
for(i=j=0; i<n_all_p; i++)
if ( p[i].mat_type == mat->mat_type )
j++;
if ( j != n_mat_p )
ERRORTEXT("ERROR! Strange particle number mismatch during weibulling.\n")
fprintf(stdout, "--------------------------------\n");
fprintf(stdout, "Now weibulling%s material '%s', material type %d ... ", message, mat->mat_name, mat->mat_type);
j=0;
while(n_miss) // distribute flaws until every particle has at least one flaw, i.e. until n_miss==0
{
i = (int) ( (double)n_all_p * rand()/(RAND_MAX+1.0) ); // generating random particle index in p, (int) truncates
if ( p[i].mat_type == mat->mat_type )
{
if (p[i].flaws.n_flaws == 0)
{
n_miss--;
if ( (p[i].flaws.act_thr = (double*)malloc( sizeof(double) )) == NULL )
ERRORTEXT("ERROR during memory allocation for flaw activation thresholds!\n")
}
else
if ( (p[i].flaws.act_thr = realloc( p[i].flaws.act_thr, sizeof(double)*(p[i].flaws.n_flaws+1) )) == NULL )
ERRORTEXT("ERROR during memory allocation for flaw activation thresholds!\n")
j++;
act_thr = pow(j/(mat->k)/volume,1.0/(mat->m)); // activation threshold according to Weibull distribution (for flaw j)
p[i].flaws.act_thr[p[i].flaws.n_flaws] = act_thr;
p[i].flaws.n_flaws++;
}
}
fprintf(stdout, "Done.\n");
fprintf(stdout, "Distributed %d flaws for %d particles.\n", j, n_mat_p);
fprintf(stdout, "Mean number of flaws per particle: %g\n", (double)j/n_mat_p);
}
void set_profile_rho_e(particle* p, int i, double r2, radial_profile_data *profile, int n)
// This function sets (linearly interpolates) rho and e for one particle 'p[i]' following the radial profile in 'profile' (length 'n'). 'p' is the absolute (first) address
// of the particle vector, 'i' is the index/element of this vector for which rho and e should be found, 'r2' is the squared distance to the origin.
{
int j;
j=0;
while( j<n )
{
if ( r2 < pow(profile[j].r,2) )
break;
j++;
}
if( j==0 ) // particle inside the innermost radius in 'profile'
{
p[i].rho = profile[0].rho;
p[i].e = profile[0].e;
fprintf(stderr, "WARNING: Particle with index %d has r = %.16le which is inside the innermost datapoint (at r = %.16le) in the radial profile. Assigned rho = %e and e = %e to it.\n",
i, sqrt(r2), profile[0].r, p[i].rho, p[i].e);
}
else if( j<n ) //particle somewhere between two radii in 'profile'
{
p[i].rho = profile[j-1].rho + (profile[j].rho-profile[j-1].rho)/(profile[j].r-profile[j-1].r) * (sqrt(r2)-profile[j-1].r);
p[i].e = profile[j-1].e + (profile[j].e-profile[j-1].e)/(profile[j].r-profile[j-1].r) * (sqrt(r2)-profile[j-1].r);
}
else // particle outside the outermost radius in 'profile'
{
p[i].rho = profile[n-1].rho;
p[i].e = profile[n-1].e;
fprintf(stderr, "WARNING: Particle with index %d has r = %.16le which is outside the outermost datapoint (at r = %.16le) in the radial profile. Assigned rho = %e and e = %e to it.\n",
i, sqrt(r2), profile[n-1].r, p[i].rho, p[i].e);
}
}
|
DRB110-ordered-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <stdio.h>
/* This is a program based on a test contributed by Yizi Gu@Rice Univ.
* Proper user of ordered directive and clause, no data races
* */
int main()
{
int x =0;
#pragma omp parallel for ordered
for (int i = 0; i < 100; ++i) {
#pragma omp ordered
x++;
}
assert (x==100);
printf ("x=%d\n",x);
return 0;
}
|
broadcast_reduce-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015-2017 by Contributors
* \file broadcast_reduce-inl.h
* \brief CPU-specific Function definition of broadcast and reduce operators
*/
#ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
#include <mxnet/operator_util.h>
#include <algorithm>
#include <vector>
#include <string>
#include <utility>
#include "../mshadow_op.h"
#include "../operator_common.h"
namespace mxnet {
namespace op {
namespace broadcast {
using namespace mshadow;
const int MAX_DIM = 5;
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
template<int ndim>
MSHADOW_XINLINE void unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stridej, const Shape<ndim>& stridek, index_t* j, index_t* k) {
*j = 0;
*k = 0;
#pragma unroll
for (index_t i = ndim-1, idx_t = idx; i >=0; --i) {
const auto tmp = idx_t / shape[i];
const auto coord = idx_t - tmp*shape[i];
*j += coord*stridej[i];
*k += coord*stridek[i];
idx_t = tmp;
}
}
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
index_t ret = 0;
#pragma unroll
for (index_t i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > 1) * coord[i];
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims,
Shape<ndim>* stride) {
int mdim = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
mdim += small[i] != big[i];
(*dims)[i] = (*stride)[i] = 1;
}
#pragma unroll
for (int i = ndim-1, j = mdim, s = 1; i >= 0; --i) {
if (small[i] != big[i]) {
--j;
(*stride)[j] = s;
(*dims)[j] = big[i];
}
s *= big[i];
}
return mdim;
}
template<int ndim>
MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (index_t i = ndim-1, j = idx; i >=0; --i) {
auto tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
template<int ndim>
MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
index_t ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i)
ret += coord[i] * stride[i];
return ret;
}
template<typename DType>
MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) {
if (addto) {
*dst += src;
} else {
*dst = src;
}
}
template<int ndim, typename DType, typename OP>
MSHADOW_XINLINE void binary_broadcast_assign(const index_t idx, const bool addto,
const DType* __restrict lhs,
const DType* __restrict rhs, DType* out,
const Shape<ndim>& lshape, const Shape<ndim>& rshape,
const Shape<ndim>& oshape) {
const Shape<ndim> coord = unravel(idx, oshape);
const index_t j = ravel(coord, lshape);
const index_t k = ravel(coord, rshape);
assign(&out[idx], addto, OP::Map(lhs[j], rhs[k]));
}
template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP>
MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto,
const DType* __restrict big, OType *small,
const Shape<ndim>& bshape, const Shape<ndim>& sshape,
const Shape<ndim>& rshape, const Shape<ndim>& rstride) {
Shape<ndim> coord = unravel(idx, sshape);
index_t j = ravel(coord, bshape);
AType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
coord = unravel(k, rshape);
Reducer::Reduce(val, AType(OP::Map(big[j + dot(coord, rstride)])), residual);
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, OType(val));
}
#ifdef __CUDACC__
#include "broadcast_reduce-inl.cuh"
#else
template<int ndim, typename DType, typename OP>
void binary_broadcast_compute(const size_t N, const bool addto, const DType *lhs,
const DType *rhs, DType *out, const Shape<ndim> lshape,
const Shape<ndim> rshape, const Shape<ndim> oshape) {
for (size_t idx = 0; idx < N; ++idx) {
binary_broadcast_assign<ndim, DType, OP>(idx, addto, lhs, rhs, out, lshape, rshape, oshape);
}
}
template<int ndim, typename DType, typename OP>
void BinaryBroadcastComputeImpl(Stream<cpu> *s, const OpReqType req,
const TBlob& lhs, const TBlob& rhs, const TBlob& out) {
if (req == kNullOp) return;
size_t N = out.shape_.Size();
binary_broadcast_compute<ndim, DType, OP>(N, req == kAddTo, lhs.dptr<DType>(), rhs.dptr<DType>(),
out.dptr<DType>(), lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>(),
out.shape_.get<ndim>());
}
template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP>
void seq_reduce_compute(const size_t N, const size_t M, const bool addto,
const DType *big, OType *small, const Shape<ndim> bshape,
const Shape<ndim> sshape, const Shape<ndim> rshape,
const Shape<ndim> rstride) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP>(idx, M, addto, big, small,
bshape, sshape, rshape, rstride);
}
}
template <typename Reducer, int ndim, typename DType, typename OP>
void seq_reduce_compute_extra_mem(const size_t N, const size_t M, const bool addto,
const DType* big, DType* small,
const Shape<ndim> bshape,
const Shape<ndim> sshape,
const Shape<ndim> rshape,
const Shape<ndim> rstride,
const index_t* ws_dptr) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
Shape<ndim> coord = unravel(idx, sshape);
index_t j = ravel(coord, bshape);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
Reducer::Reduce(val, OP::Map(big[j + ws_dptr[k]]), residual);
}
assign(&small[idx], addto, val);
}
}
template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false>
void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size(), M = rshape.Size();
if (!safe_acc) {
seq_reduce_compute<Reducer, ndim, DType, DType, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
} else {
MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
MSHADOW_TYPE_SWITCH(small.type_flag_, OType, {
typedef typename std::conditional<safe_acc, OType, DataType>::type OutType;
seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>(
N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
});
});
}
}
template <typename Reducer, int ndim, typename DType, typename OP>
void ReduceWithExtraMem(Stream<cpu>* s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big) {
using namespace mxnet_op;
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
index_t* ws_dptr = reinterpret_cast<index_t*>(workspace.dptr_);
size_t N = small.shape_.Size(), M = rshape.Size();
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t k = 0; k < static_cast<index_t>(M); k++) {
Shape<ndim> coord = unravel(k, rshape);
ws_dptr[k] = dot(coord, rstride);
}
seq_reduce_compute_extra_mem<Reducer, ndim, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(),
small.shape_.get<ndim>(), rshape, rstride, ws_dptr);
}
template<int ndim, typename DType>
size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req,
const mxnet::TShape& big) {
return 0;
}
template<int ndim, typename DType>
size_t ReduceWorkspaceSize(Stream<cpu> *s, const mxnet::TShape& small, const OpReqType req,
const mxnet::TShape& big, const mxnet::TShape& lhs,
const mxnet::TShape& rhs) {
return 0;
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto,
const DType* __restrict big, const DType* __restrict lhs,
const DType* __restrict rhs, DType *small,
const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0,
const Shape<ndim>& rhs_shape0,
const Shape<ndim>& small_shape, const Shape<ndim>& rshape,
const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape,
const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride,
const Shape<ndim>& rhs_stride) {
Shape<ndim> coord = unravel(idx, small_shape);
const index_t idx_big0 = ravel(coord, big_shape);
const index_t idx_lhs0 = ravel(coord, lhs_shape0);
const index_t idx_rhs0 = ravel(coord, rhs_shape0);
DType val, residual;
Reducer::SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
Shape<ndim> coord_big = unravel(k, rshape);
index_t idx_big = idx_big0 + dot(coord_big, rstride);
Shape<ndim> coord_lhs = unravel(k, lhs_shape);
index_t idx_lhs = idx_lhs0 + dot(coord_lhs, lhs_stride);
Shape<ndim> coord_rhs = unravel(k, rhs_shape);
index_t idx_rhs = idx_rhs0 + dot(coord_rhs, rhs_stride);
Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual);
}
Reducer::Finalize(val, residual);
assign(&small[idx], addto, val);
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void seq_reduce_compute(const size_t N, const size_t M, const bool addto,
const DType *big, const DType *lhs, const DType *rhs, DType *small,
const Shape<ndim> big_shape, const Shape<ndim> small_shape,
const Shape<ndim> rshape, const Shape<ndim> rstride,
const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride,
const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride,
const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small,
big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride,
lhs_stride, rhs_stride);
}
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void Reduce(Stream<cpu> *s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs,
const TBlob& rhs) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size();
size_t M = rshape.Size();
Shape<ndim> lhs_shape, lhs_stride;
diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride);
Shape<ndim> rhs_shape, rhs_stride;
diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride);
seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>(
N, M, req == kAddTo,
big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(),
rshape, rstride,
lhs_shape, lhs_stride,
rhs_shape, rhs_stride,
lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>());
}
#endif
} // namespace broadcast
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
|
core_ctrtri.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrtri.c, normal z -> c, Fri Sep 28 17:38:24 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_trtri
*
* Computes the inverse of an upper or lower
* triangular matrix A.
*
*******************************************************************************
*
* @param[in] uplo
* = PlasmaUpper: Upper triangle of A is stored;
* = PlasmaLower: Lower triangle of A is stored.
*
* @param[in] diag
* = PlasmaNonUnit: A is non-unit triangular;
* = PlasmaUnit: A is unit triangular.
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in,out] A
* On entry, the triangular matrix A. If uplo = 'U', the
* leading n-by-n upper triangular part of the array A
* contains the upper triangular matrix, and the strictly
* lower triangular part of A is not referenced. If uplo =
* 'L', the leading n-by-n lower triangular part of the array
* A contains the lower triangular matrix, and the strictly
* upper triangular part of A is not referenced. If diag =
* 'U', the diagonal elements of A are also not referenced and
* are assumed to be 1. On exit, the (triangular) inverse of
* the original matrix.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,n).
*
* @retval PlasmaSuccess on successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, A(i,i) is exactly zero. The triangular
* matrix is singular and its inverse can not be computed.
*
******************************************************************************/
__attribute__((weak))
int plasma_core_ctrtri(plasma_enum_t uplo, plasma_enum_t diag,
int n,
plasma_complex32_t *A, int lda)
{
return LAPACKE_ctrtri_work(LAPACK_COL_MAJOR,
lapack_const(uplo), lapack_const(diag),
n, A, lda);
}
/******************************************************************************/
void plasma_core_omp_ctrtri(plasma_enum_t uplo, plasma_enum_t diag,
int n,
plasma_complex32_t *A, int lda,
int iinfo,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(inout:A[0:lda*n])
{
if (sequence->status == PlasmaSuccess) {
int info = plasma_core_ctrtri(uplo, diag,
n, A, lda);
if (info != 0)
plasma_request_fail(sequence, request, iinfo+info);
}
}
}
|
fista.h |
/* Software SPAMS v2.1 - Copyright 2009-2011 Julien Mairal
*
* This file is part of SPAMS.
*
* SPAMS is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SPAMS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SPAMS. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef FISTA_H
#define FISTA_H
#include <linalg.h>
#include <project.h>
namespace FISTA {
enum loss_t { SQUARE, SQUARE_MISSING, LOG, LOGWEIGHT, MULTILOG, CUR, HINGE, POISSON, INCORRECT_LOSS};
enum regul_t { L0, L1, RIDGE, L2, LINF, L1CONSTRAINT, ELASTICNET, FUSEDLASSO, GROUPLASSO_L2, GROUPLASSO_LINF, GROUPLASSO_L2_L1, GROUPLASSO_LINF_L1, L1L2, L1LINF, L1L2_L1, L1LINF_L1, TREE_L0, TREE_L2, TREE_LINF, GRAPH, GRAPH_RIDGE, GRAPH_L2, TREEMULT, GRAPHMULT, L1LINFCR, NONE, TRACE_NORM, TRACE_NORM_VEC, RANK, RANK_VEC, INCORRECT_REG, GRAPH_PATH_L0, GRAPH_PATH_CONV, LOG_DC, NA};
regul_t regul_from_string(char* regul) {
if (strcmp(regul,"l0")==0) return L0;
if (strcmp(regul,"l1")==0) return L1;
if (strcmp(regul,"l2")==0) return RIDGE;
if (strcmp(regul,"linf")==0) return LINF;
if (strcmp(regul,"l2-not-squared")==0) return L2;
if (strcmp(regul,"log-dc")==0) return LOG_DC;
if (strcmp(regul,"l1-constraint")==0) return L1CONSTRAINT;
if (strcmp(regul,"elastic-net")==0) return ELASTICNET;
if (strcmp(regul,"fused-lasso")==0) return FUSEDLASSO;
if (strcmp(regul,"group-lasso-l2")==0) return GROUPLASSO_L2;
if (strcmp(regul,"group-lasso-linf")==0) return GROUPLASSO_LINF;
if (strcmp(regul,"sparse-group-lasso-l2")==0) return GROUPLASSO_L2_L1;
if (strcmp(regul,"sparse-group-lasso-linf")==0) return GROUPLASSO_LINF_L1;
if (strcmp(regul,"l1l2")==0) return L1L2;
if (strcmp(regul,"l1linf")==0) return L1LINF;
if (strcmp(regul,"l1l2+l1")==0) return L1L2_L1;
if (strcmp(regul,"l1linf+l1")==0) return L1LINF_L1;
if (strcmp(regul,"tree-l0")==0) return TREE_L0;
if (strcmp(regul,"tree-l2")==0) return TREE_L2;
if (strcmp(regul,"tree-linf")==0) return TREE_LINF;
if (strcmp(regul,"graph")==0) return GRAPH;
if (strcmp(regul,"graph-ridge")==0) return GRAPH_RIDGE;
if (strcmp(regul,"graph-l2")==0) return GRAPH_L2;
if (strcmp(regul,"multi-task-tree")==0) return TREEMULT;
if (strcmp(regul,"multi-task-graph")==0) return GRAPHMULT;
if (strcmp(regul,"l1linf-row-column")==0) return L1LINFCR;
if (strcmp(regul,"trace-norm")==0) return TRACE_NORM;
if (strcmp(regul,"trace-norm-vec")==0) return TRACE_NORM_VEC;
if (strcmp(regul,"rank")==0) return RANK;
if (strcmp(regul,"rank-vec")==0) return RANK_VEC;
if (strcmp(regul,"graph-path-l0")==0) return GRAPH_PATH_L0;
if (strcmp(regul,"graph-path-conv")==0) return GRAPH_PATH_CONV;
if (strcmp(regul,"none")==0) return NONE;
return INCORRECT_REG;
}
loss_t loss_from_string(char* loss) {
if (strcmp(loss,"square")==0) return SQUARE;
if (strcmp(loss,"square-missing")==0) return SQUARE_MISSING;
if (strcmp(loss,"logistic")==0) return LOG;
if (strcmp(loss,"poisson")==0) return POISSON;
if (strcmp(loss,"weighted-logistic")==0) return LOGWEIGHT;
if (strcmp(loss,"hinge")==0) return HINGE;
if (strcmp(loss,"multi-logistic")==0) return MULTILOG;
if (strcmp(loss,"cur")==0) return CUR;
return INCORRECT_LOSS;
}
void print_loss(const loss_t& loss) {
switch (loss) {
case SQUARE: cout << "Square loss" << endl; break;
case SQUARE_MISSING: cout << "Square loss with missing data" << endl; break;
case LOG: cout << "Logistic loss" << endl; break;
case LOGWEIGHT: cout << "Weighted Logistic loss" << endl; break;
case HINGE: cout << "Hinge loss" << endl; break;
case MULTILOG: cout << "Multiclass logistic Loss" << endl; break;
case POISSON: cout << "Modified Poisson loss" << endl; break;
case CUR: cout << "CUR decomposition" << endl; break;
default: cerr << "Not implemented" << endl;
}
};
bool loss_for_matrices(const loss_t& loss) {
return loss==MULTILOG || loss==CUR;
}
void print_regul(const regul_t& regul) {
switch (regul) {
case L0: cout << "L0 regularization" << endl; break;
case L1: cout << "L1 regularization" << endl; break;
case RIDGE: cout << "L2-squared regularization" << endl; break;
case L2: cout << "L2-not-squared regularization" << endl; break;
case LOG_DC: cout << "reweighted-l1 regularization" << endl; break;
case L1CONSTRAINT: cout << "L1 constraint regularization" << endl; break;
case LINF: cout << "Linf regularization" << endl; break;
case ELASTICNET: cout << "Elastic-net regularization" << endl; break;
case FUSEDLASSO: cout << "Fused Lasso or total variation regularization" << endl; break;
case GROUPLASSO_L2: cout << "Group Lasso L2" << endl; break;
case GROUPLASSO_LINF: cout << "Group Lasso LINF" << endl; break;
case GROUPLASSO_L2_L1: cout << "Group Lasso L2 + L1" << endl; break;
case GROUPLASSO_LINF_L1: cout << "Group Lasso LINF + L1" << endl; break;
case L1L2: cout << "L1L2 regularization" << endl; break;
case L1LINF: cout << "L1LINF regularization" << endl; break;
case TRACE_NORM: cout << "Trace Norm regularization" << endl; break;
case TRACE_NORM_VEC: cout << "Trace Norm regularization for vectors" << endl; break;
case RANK: cout << "Rank regularization" << endl; break;
case RANK_VEC: cout << "Rank regularization for vectors" << endl; break;
case L1L2_L1: cout << "L1L2 regularization + L1" << endl; break;
case L1LINF_L1: cout << "L1LINF regularization + L1" << endl; break;
case TREE_L0: cout << "Tree-L0 regularization" << endl; break;
case TREE_L2: cout << "Tree-L2 regularization" << endl; break;
case TREE_LINF: cout << "Tree-Linf regularization" << endl; break;
case GRAPH: cout << "Graph regularization" << endl; break;
case GRAPH_RIDGE: cout << "Graph+ridge regularization" << endl; break;
case GRAPH_L2: cout << "Graph regularization with l2" << endl; break;
case TREEMULT: cout << "multitask tree regularization" << endl; break;
case GRAPHMULT: cout << "multitask graph regularization" << endl; break;
case L1LINFCR: cout << "L1LINF regularization on rows and columns" << endl; break;
case GRAPH_PATH_L0: cout << "Graph path non-convex regularization" << endl; break;
case GRAPH_PATH_CONV: cout << "Graph path convex regularization" << endl; break;
case NONE: cout << "No regularization" << endl; break;
default: cerr << "Not implemented" << endl;
}
};
bool regul_for_matrices(const regul_t& regul) {
return regul==L1L2 || regul==L1LINF || regul==L1L2_L1 || regul==L1LINF_L1
|| regul==TREEMULT || regul==GRAPHMULT || regul==L1LINFCR ||
regul==TRACE_NORM || regul==RANK;
}
template <typename T> struct ParamFISTA {
ParamFISTA() { num_threads=1; max_it=100; L0=T(0.1); gamma=T(1.5); tol=T(1e-10);
it0=10; max_iter_backtracking=1000; loss=SQUARE; compute_gram=false; admm=false; lin_admm=false;
intercept=false; regul=RIDGE; resetflow=false; delta=0; lambda2=0; lambda3=0; verbose=false;
pos=false; clever=true; a=T(1.0); b=T(0.0); c=T(1.0);
log=false; logName=NULL; ista=false; subgrad=false;
length_names=30;
name_regul=new char[length_names];
name_loss=new char[length_names];
is_inner_weights=false;
inner_weights=NULL;
eval=false;
size_group=1;
sqrt_step=true;
transpose=false;
fixed_step=false;
copied=false;
eval_dual_norm=false;
groups=NULL;
ngroups=0;
linesearch_mode=0;
}
~ParamFISTA() {
if (!copied) {
delete[](name_regul);
delete[](name_loss);
}
};
int num_threads;
int max_it;
T L0;
T gamma;
int length_names;
T lambda;
T delta;
T lambda2;
T lambda3;
T a;
T b;
T c;
T tol;
int it0;
int max_iter_backtracking;
loss_t loss;
bool compute_gram;
bool lin_admm;
bool admm;
bool intercept;
bool resetflow;
regul_t regul;
char* name_regul;
char* name_loss;
bool verbose;
bool pos;
bool clever;
bool log;
bool ista;
bool copied;
bool subgrad;
char* logName;
bool is_inner_weights;
T* inner_weights;
bool eval;
int size_group;
bool sqrt_step;
bool transpose;
bool fixed_step;
bool eval_dual_norm;
int* groups;
int ngroups;
int linesearch_mode;
};
template <typename T> struct ParamReg {
ParamReg() { size_group=1; lambda2d1 = 0; lambda=0; lambda3d1 = 0; pos=false; intercept=false; num_cols=1; graph_st=NULL; tree_st=NULL;
graph_path_st=NULL; resetflow=false; clever=false; linf=true; transpose=false; ngroups=0;
groups=NULL; };
T lambda2d1;
T lambda3d1;
T lambda;
int size_group;
bool pos;
bool intercept;
int num_cols;
GraphPathStruct<T>* graph_path_st;
GraphStruct<T>* graph_st;
TreeStruct<T>* tree_st;
bool resetflow;
bool clever;
bool linf;
bool transpose;
int ngroups;
int* groups;
};
template <typename T>
bool param_for_admm(const ParamFISTA<T>& param) {
return (param.admm) && (param.loss==SQUARE || param.loss == HINGE)
&& (param.regul==GRAPH_L2 || param.regul==GRAPH || param.regul == NONE);
};
template <typename T, typename F = Matrix<T>, typename D = Vector<T> ,
typename E = Vector<T> >
class SplittingFunction {
public:
SplittingFunction() { };
virtual ~SplittingFunction() { };
virtual void init(const E& y) { };
virtual T eval(const D& input) const = 0;
virtual void reset() { };
virtual T eval_split(const F& input) const = 0;
virtual T eval_weighted(const D& input,const F& input_struct, const T* weights) const { return this->eval(input);};
virtual int num_components() const = 0;
virtual void prox_split(F& splitted_w, const T lambda) const = 0;
virtual void init_split_variables(F& splitted_w) const = 0;
virtual void init_prim_var(E& prim_var) const { };
virtual void prox_prim_var(E& out,const E& dual_var, const E& prim_var, const T gamma) const { };
virtual void compute_new_prim(E& prim, const E& prim_var, const E& dual_var, const T gamma, const T delta) const { };
virtual void add_mult_design_matrix(const E& prim, E& out, const T fact) const { };
private:
explicit SplittingFunction<T,F,D,E>(const SplittingFunction<T,F,D,E>& loss);
SplittingFunction<T,F,D,E>& operator=(const SplittingFunction<T,F,D,E>& loss);
};
template <typename T, typename D = Vector<T> , typename E = Vector<T> >
class Loss {
public:
Loss() { };
virtual ~Loss() { };
virtual void init(const E& input) = 0;
virtual T eval(const D& input) const = 0;
virtual void grad(const D& input, D& output) const = 0;
virtual inline bool test_backtracking(const D& y, const D& grad, const D& prox, const T L) const {
D tmp;
tmp.copy(prox);
tmp.sub(y);
return (this->eval(prox) <= this->eval(y) + grad.dot(tmp) + 0.5*L*tmp.nrm2sq());
};
virtual T fenchel(const D& input) const = 0;
virtual bool is_fenchel() const { return true; };
virtual void var_fenchel(const D& x, D& grad1, D& grad2,
const bool intercept = false) const = 0;
private:
explicit Loss<T,D,E>(const Loss<T,D,E>& dict);
Loss<T,D,E>& operator=(const Loss<T,D,E>& dict);
};
template <typename T>
class SqLossMissing : public Loss<T> {
public:
SqLossMissing(const AbstractMatrixB<T>& D) : _D(&D) { };
virtual ~SqLossMissing() { };
inline void init(const Vector<T>& x) {
_x.copy(x);
_missingvalues.clear();
for (int i = 0; i<_x.n(); ++i) {
if (isnan(_x[i])) {
_x[i]=0;
_missingvalues.push_back(i);
}
}
};
inline T eval(const Vector<T>& alpha) const {
Vector<T> residual;
residual.copy(_x);
SpVector<T> spalpha(alpha.n());
alpha.toSparse(spalpha);
_D->mult(spalpha,residual,T(-1.0),T(1.0));
for (ListIterator<int> it = _missingvalues.begin();
it != _missingvalues.end(); ++it)
residual[*it]=0;
return 0.5*residual.nrm2sq();
}
inline void grad(const Vector<T>& alpha, Vector<T>& grad) const {
Vector<T> residual;
residual.copy(_x);
SpVector<T> spalpha(alpha.n());
alpha.toSparse(spalpha);
_D->mult(spalpha,residual,T(-1.0),T(1.0));
for (ListIterator<int> it = _missingvalues.begin();
it != _missingvalues.end(); ++it)
residual[*it]=0;
_D->multTrans(residual,grad,T(-1.0),T(0.0));
};
virtual T fenchel(const Vector<T>& input) const {
return 0.5*input.nrm2sq()+input.dot(_x);
};
virtual void var_fenchel(const Vector<T>& x,
Vector<T>& grad1, Vector<T>& grad2,
const bool intercept) const {
grad1.copy(_x);
SpVector<T> spalpha(x.n());
x.toSparse(spalpha);
_D->mult(spalpha,grad1,T(1.0),T(-1.0));
for (ListIterator<int> it = _missingvalues.begin();
it != _missingvalues.end(); ++it)
grad1[*it]=0;
if (intercept)
grad1.whiten(1); // remove the mean of grad1
_D->multTrans(grad1,grad2,T(1.0),T(0.0));
};
private:
explicit SqLossMissing<T>(const SqLossMissing<T>& dict);
SqLossMissing<T>& operator=(const SqLossMissing<T>& dict);
const AbstractMatrixB<T>* _D;
Vector<T> _x;
List<int> _missingvalues;
};
template <typename T>
class SqLoss : public Loss<T>, public SplittingFunction<T> {
public:
SqLoss(const AbstractMatrixB<T>& D) : _D(&D) { _compute_gram = false; };
SqLoss(const AbstractMatrixB<T>& D, const Matrix<T>& G) : _D(&D), _G(&G) { _compute_gram = true; };
virtual ~SqLoss() { };
inline void init(const Vector<T>& x) {
_x.copy(x);
if (_compute_gram) {
_D->multTrans(x,_DtX);
}
};
inline T eval(const Vector<T>& alpha) const {
Vector<T> residual;
residual.copy(_x);
SpVector<T> spalpha(alpha.n());
alpha.toSparse(spalpha);
if (spalpha.L() < alpha.n()/2) {
_D->mult(spalpha,residual,T(-1.0),T(1.0));
} else {
_D->mult(alpha,residual,T(-1.0),T(1.0));
}
return 0.5*residual.nrm2sq();
}
inline void grad(const Vector<T>& alpha, Vector<T>& grad) const {
SpVector<T> spalpha(alpha.n());
alpha.toSparse(spalpha);
if (_compute_gram) {
grad.copy(_DtX);
_G->mult(spalpha,grad,T(1.0),-T(1.0));
} else {
Vector<T> residual;
residual.copy(_x);
_D->mult(spalpha,residual,T(-1.0),T(1.0));
_D->multTrans(residual,grad,T(-1.0),T(0.0));
}
};
virtual inline bool test_backtracking(const Vector<T>& y, const Vector<T>& grad, const Vector<T>& prox, const T L) const {
Vector<T> tmp;
tmp.copy(y);
tmp.sub(prox);
SpVector<T> sptmp(tmp.n());
tmp.toSparse(sptmp);
if (_compute_gram) {
return (_G->quad(sptmp) <= L*sptmp.nrm2sq());
} else {
Vector<T> tmp2(_D->m());
_D->mult(sptmp,tmp2);
return (tmp2.nrm2sq() <= L*sptmp.nrm2sq());
}
};
virtual T fenchel(const Vector<T>& input) const {
return 0.5*input.nrm2sq()+input.dot(_x);
};
virtual void var_fenchel(const Vector<T>& x,
Vector<T>& grad1, Vector<T>& grad2,
const bool intercept) const {
grad1.copy(_x);
SpVector<T> spalpha(x.n());
x.toSparse(spalpha);
_D->mult(spalpha,grad1,T(1.0),T(-1.0));
if (intercept)
grad1.whiten(1); // remove the mean of grad1
_D->multTrans(grad1,grad2,T(1.0),T(0.0));
};
inline int num_components() const { return _D->m();};
inline void prox_split(Matrix<T>& splitted_w, const T lambda) const {
const int n = this->num_components();
Vector<T> row(_D->n());
Vector<T> wi;
for (int i = 0; i<n; ++i) {
_D->copyRow(i,row);
splitted_w.refCol(i,wi);
const T xtw=row.dot(wi);
const T xtx=row.dot(row);
wi.add(row,-lambda*(xtw-_x[i])/(T(1.0)+lambda*xtx));
}
};
inline T eval_split(const Matrix<T>& input) const {
const int n = this->num_components();
Vector<T> row(_D->n());
Vector<T> wi;
T sum = 0;
for (int i = 0; i<n; ++i) {
_D->copyRow(i,row);
input.refCol(i,wi);
const T xtw=row.dot(wi);
sum += 0.5*(_x[i]-xtw)*(_x[i]-xtw);
}
return sum;
};
inline void init_split_variables(Matrix<T>& splitted_w) const {
splitted_w.resize(_D->n(),_D->m());
splitted_w.setZeros();
};
inline void init_prim_var(Vector<T>& prim_var) const {
prim_var.resize(_D->m());
prim_var.setZeros();
}
virtual void prox_prim_var(Vector<T>& out,const Vector<T>& dual_var,
const Vector<T>& prim_var, const T c) const {
const T gamma=T(1.0)/c;
out.copy(dual_var);
out.scal(-gamma);
_D->mult(prim_var,out,T(1.0),T(1.0));
out.add(_x,gamma);
out.scal(T(1.0)/(T(1.0)+gamma));
};
inline void compute_new_prim(Vector<T>& prim, const Vector<T>& prim_var,
const Vector<T>& dual_var, const T gamma, const T delta) const {
Vector<T> tmp;
_D->mult(prim,tmp);
tmp.scal(-gamma);
tmp.add(prim_var);
tmp.add(dual_var,gamma);
_D->multTrans(tmp,prim,T(1.0),delta);
};
inline void add_mult_design_matrix(const Vector<T>& prim,
Vector<T>& out, const T fact) const {
_D->mult(prim,out,fact,T(1.0));
};
private:
explicit SqLoss<T>(const SqLoss<T>& dict);
SqLoss<T>& operator=(const SqLoss<T>& dict);
const AbstractMatrixB<T>* _D;
Vector<T> _x;
bool _compute_gram;
const Matrix<T>* _G;
Vector<T> _DtX;
};
template <typename T>
class HingeLoss : public SplittingFunction<T > {
public:
HingeLoss(const AbstractMatrixB<T>& X) : _X(&X) { };
virtual ~HingeLoss() { };
inline void init(const Vector<T>& y) {
_y.copy(y);
};
inline T eval(const Vector<T>& w) const {
Vector<T> tmp(_X->m());
SpVector<T> spw(w.n());
w.toSparse(spw);
_X->mult(spw,tmp);
tmp.mult(_y,tmp);
tmp.neg();
tmp.add(T(1.0));
tmp.thrsPos();
return tmp.sum()/tmp.n();
};
virtual T eval_split(const Matrix<T>& input) const {
Vector<T> row(_X->n());
Vector<T> wi;
T sum = 0;
for (int i = 0; i<_X->n(); ++i) {
_X->copyRow(i,row);
input.refCol(i,wi);
sum += MAX(0,T(1.0)-_y[i]*row.dot(wi));
}
return sum/_X->m();
};
virtual int num_components() const { return _X->m(); };
inline void init_split_variables(Matrix<T>& splitted_w) const {
splitted_w.resize(_X->n(),_X->m());
splitted_w.setZeros();
};
inline void init_prim_var(Vector<T>& prim_var) const {
prim_var.resize(_X->m());
prim_var.setZeros();
}
/* inline void prox_prim_var(Vector<T>& out,const Vector<T>& dual_var,
const Vector<T>& prim_var, const T lambda, const T c) const {
const T gamma=T(1.0)/c;
out.copy(dual_var);
out.scal(-gamma);
_X->mult(prim_var,out,T(1.0),T(1.0));
const T thrs=T(1.0)-gamma;
for (int i = 0; i<out.n(); ++i) {
const T y = _y[i]*out[i];
if (y < thrs) {
out[i]+=_y[i]*gamma;
} else if (y < T(1.0)) {
out[i]=_y[i];
}
}
}*/
inline void compute_new_prim(Vector<T>& prim, const Vector<T>& prim_var,
const Vector<T>& dual_var, const T gamma, const T delta) const {
Vector<T> tmp;
_X->mult(prim,tmp);
tmp.scal(-gamma);
tmp.add(prim_var);
tmp.add(dual_var,gamma);
_X->multTrans(tmp,prim,T(1.0),delta);
};
inline void add_mult_design_matrix(const Vector<T>& prim, Vector<T>& out,
const T fact) const {
_X->mult(prim,out,fact,T(1.0));
};
inline void prox_split(Matrix<T>& splitted_w, const T lambda) const {
const int n = this->num_components();
Vector<T> row(_X->n());
Vector<T> wi;
for (int i = 0; i<n; ++i) {
_X->copyRow(i,row);
splitted_w.refCol(i,wi);
const T xtw=row.dot(wi);
const T xtx=row.dot(row);
const T diff=1-_y[i]*xtw;
if (diff > lambda*xtx) {
wi.add(row,lambda*_y[i]);
} else if (diff > 0) {
wi.add(row,_y[i]*diff/xtx);
}
}
};
private:
explicit HingeLoss<T>(const HingeLoss<T>& dict);
HingeLoss<T>& operator=(const HingeLoss<T>& dict);
const AbstractMatrixB<T>* _X;
Vector<T> _y;
};
template <typename T, bool weighted = false>
class LogLoss : public Loss<T> {
public:
LogLoss(const AbstractMatrixB<T>& X) : _X(&X) { };
virtual ~LogLoss() { };
inline void init(const Vector<T>& y) {
_y.copy(y);
if (weighted) {
int countpos=0;
for (int i = 0; i<y.n(); ++i)
if (y[i]>0) countpos++;
_weightpos=T(1.0)/countpos;
_weightneg=T(1.0)/MAX(1e-3,(y.n()-countpos));
}
};
inline T eval(const Vector<T>& w) const {
Vector<T> tmp(_X->m());
SpVector<T> spw(w.n());
w.toSparse(spw);
_X->mult(spw,tmp);
tmp.mult(_y,tmp);
tmp.neg();
tmp.logexp();
if (weighted) {
T sum=0;
for (int i = 0; i<tmp.n(); ++i)
sum+= _y[i]>0 ? _weightpos*tmp[i] : _weightneg*tmp[i];
return sum;
} else {
return tmp.sum()/tmp.n();
}
};
inline void grad(const Vector<T>& w, Vector<T>& grad) const {
Vector<T> tmp(_X->m());
SpVector<T> spw(w.n());
w.toSparse(spw);
_X->mult(spw,tmp);
tmp.mult(_y,tmp);
tmp.exp();
tmp.add(T(1.0));
tmp.inv();
tmp.mult(_y,tmp);
tmp.neg();
if (weighted) {
for (int i = 0; i<tmp.n(); ++i)
tmp[i] *= _y[i] > 0 ? _weightpos : _weightneg;
_X->multTrans(tmp,grad);
} else {
_X->multTrans(tmp,grad);
grad.scal(T(1.0)/_X->m());
}
};
virtual bool is_fenchel() const { return !weighted; };
virtual T fenchel(const Vector<T>& input) const {
T sum = 0;
if (weighted) {
// TODO : check that
for (int i = 0; i<input.n(); ++i) {
T prod = _y[i]>0 ? input[i]/_weightpos : -input[i]/_weightneg;
sum += _y[i] >0 ? _weightpos*(xlogx(1.0+prod)+xlogx(-prod)) : _weightneg*(xlogx(1.0+prod)+xlogx(-prod));
}
return sum;
} else {
for (int i = 0; i<input.n(); ++i) {
T prod = _y[i]*input[i]*_X->m();
sum += xlogx(1.0+prod)+xlogx(-prod);
}
return sum/_X->m();
}
};
virtual void var_fenchel(const Vector<T>& w, Vector<T>& grad1, Vector<T>& grad2, const bool intercept) const {
grad1.resize(_X->m());
SpVector<T> spw(w.n());
w.toSparse(spw);
_X->mult(spw,grad1);
grad1.mult(_y,grad1);
grad1.exp();
grad1.add(T(1.0));
grad1.inv();
grad1.mult(_y,grad1);
grad1.neg(); // -gradient (no normalization)
if (intercept)
grad1.project_sft_binary(_y);
grad1.scal(T(1.0)/_X->m());
_X->multTrans(grad1,grad2);
};
private:
explicit LogLoss<T,weighted>(const LogLoss<T,weighted>& dict);
LogLoss<T,weighted>& operator=(const LogLoss<T,weighted>& dict);
const AbstractMatrixB<T>* _X;
Vector<T> _y;
T _weightpos;
T _weightneg;
};
template <typename T>
class MultiLogLoss : public Loss<T, Matrix<T> > {
public:
MultiLogLoss(const AbstractMatrixB<T>& X) : _X(&X) { };
virtual ~MultiLogLoss() { };
inline void init(const Vector<T>& y) {
_y.resize(y.n());
for (int i = 0; i<y.n(); ++i)
_y[i] = static_cast<int>(y[i]);
};
inline T eval(const Matrix<T>& W) const {
Matrix<T> tmp;
_X->multSwitch(W,tmp,true,true);
//W.mult(*_X,tmp,true,true);
Vector<T> col;
T sum=0;
for (int i = 0; i<tmp.n(); ++i) {
tmp.refCol(i,col);
sum+=col.softmax(_y[i]);
}
return sum/tmp.n();
};
inline void grad(const Matrix<T>& W, Matrix<T>& grad) const {
Matrix<T> tmp;
_X->multSwitch(W,tmp,true,true);
//W.mult(*_X,tmp,true,true);
Vector<T> col;
grad.resize(W.m(),W.n());
for (int i = 0; i<tmp.n(); ++i) {
tmp.refCol(i,col);
col.add(-col[_y[i]]);
bool overweight=false;
for (int j = 0; j<col.n(); ++j)
if (col[j] > 1e2)
overweight=true;
if (overweight) {
const int ind =col.fmax();
col.setZeros();
col[ind]=1;
} else {
col.exp();
col.scal(T(1.0)/col.sum());
col.scal(T(1.0)/col.sum());
}
col[_y[i]] = col[_y[i]]-T(1.0);
}
_X->mult(tmp,grad,true,true);
grad.scal(T(1.0)/_X->m());
};
virtual T fenchel(const Matrix<T>& input) const {
T sum = 0;
Vector<T> col;
for (int i = 0; i<input.n(); ++i) {
const int clas = _y[i];
input.refCol(i,col);
for (int j = 0; j<input.m(); ++j) {
if (j == clas) {
sum += xlogx(_X->m()*input[i*input.m()+j]+1.0);
} else {
sum += xlogx(_X->m()*input[i*input.m()+j]);
}
}
}
return sum/_X->m();
};
virtual void var_fenchel(const Matrix<T>& W, Matrix<T>& grad1, Matrix<T>& grad2, const bool intercept) const {
_X->multSwitch(W,grad1,true,true);
//W.mult(*_X,grad1,true,true);
Vector<T> col;
for (int i = 0; i<grad1.n(); ++i) {
grad1.refCol(i,col);
col.add(-col[_y[i]]);
bool overweight=false;
for (int j = 0; j<col.n(); ++j)
if (col[j] > 1e2)
overweight=true;
if (overweight) {
const int ind =col.fmax();
col.setZeros();
col[ind]=1;
} else {
col.exp();
col.scal(T(1.0)/col.sum());
col.scal(T(1.0)/col.sum());
}
col[_y[i]] = col[_y[i]]-T(1.0);
}
if (intercept) {
Vector<T> row;
for (int i = 0; i<grad1.m(); ++i) {
grad1.extractRow(i,row);
row.project_sft(_y,i);
grad1.setRow(i,row);
}
}
grad1.scal(T(1.0)/_X->m());
grad2.resize(W.m(),W.n());
_X->mult(grad1,grad2,true,true);
};
private:
explicit MultiLogLoss<T>(const MultiLogLoss<T>& dict);
MultiLogLoss<T>& operator=(const MultiLogLoss<T>& dict);
const AbstractMatrixB<T>* _X;
Vector<int> _y;
};
template <typename T>
class PoissonLoss : public Loss<T> {
public:
PoissonLoss(const AbstractMatrixB<T>& X, const T delta) : _X(&X), _delta(delta) { };
virtual ~PoissonLoss() { };
inline void init(const Vector<T>& y) {
_y.copy(y);
};
inline T eval(const Vector<T>& w) const {
Vector<T> tmp(_X->m());
SpVector<T> spw(w.n());
w.toSparse(spw);
_X->mult(spw,tmp);
T sum=tmp.sum()+_delta*tmp.n();
for (int i = 0; i<tmp.n(); ++i)
tmp[i] = tmp[i] > 0 ? log(tmp[i]+_delta) : tmp[i]/_delta + log(_delta);
tmp.mult(_y,tmp);
return (sum-tmp.sum());
};
inline void grad(const Vector<T>& w, Vector<T>& grad) const {
Vector<T> tmp(_X->m());
SpVector<T> spw(w.n());
w.toSparse(spw);
_X->mult(spw,tmp);
for (int i = 0; i<tmp.n(); ++i)
tmp[i] = tmp[i] > 0 ? T(1.0)/(tmp[i]+_delta) : T(1.0)/_delta;
tmp.mult(_y,tmp);
tmp.neg();
tmp.add(T(1.0));
_X->multTrans(tmp,grad);
};
virtual bool is_fenchel() const { return true; };
virtual T fenchel(const Vector<T>& input) const {
// only valid with non-negativity constraints (automatically
// activated with this loss
T sum = 0;
for (int i = 0; i<input.n(); ++i) {
T thrs=T(1.0)-_y[i]/_delta;
if (input[i] <= thrs) {
sum += -_delta+_y[i]*alt_log<T>(_delta);
} else if (input[i] <= T(1.0)) {
sum += -_delta*input[i] - _y[i] +
_y[i]*alt_log<T>(_y[i]/(T(1.0)+EPSILON-input[i]));
} else {
sum += INFINITY;
}
}
return sum;
};
virtual void var_fenchel(const Vector<T>& w, Vector<T>& grad1,
Vector<T>& grad2, const bool intercept) const {
grad1.resize(_X->m());
SpVector<T> spw(w.n());
w.toSparse(spw);
_X->mult(spw,grad1);
grad1.add(_delta);
grad1.inv();
grad1.mult(_y,grad1);
grad1.neg();
grad1.add(T(1.0));
_X->multTrans(grad1,grad2);
};
private:
explicit PoissonLoss<T>(const PoissonLoss<T>& dict);
PoissonLoss<T>& operator=(const PoissonLoss<T>& dict);
const AbstractMatrixB<T>* _X;
Vector<T> _y;
T _delta;
};
template <typename T>
class LossCur: public Loss<T, Matrix<T>, Matrix<T> > {
public:
LossCur(const AbstractMatrixB<T>& X) : _X(&X) { };
virtual ~LossCur() { };
inline void init(const Matrix<T>& y) { };
inline T eval(const Matrix<T>& A) const {
Matrix<T> tmp(_X->m(),A.n());
_X->mult(A,tmp);
Matrix<T> tmp2;
//tmp2.copy(*_X);
_X->copyTo(tmp2);
//tmp.mult(*_X,tmp2,false,false,T(-1.0),T(1.0));
_X->multSwitch(tmp,tmp2,false,false,T(-1.0),T(1.0));
return 0.5*tmp2.normFsq();
};
inline void grad(const Matrix<T>& A, Matrix<T>& grad) const {
Matrix<T> tmp(_X->m(),A.n());
_X->mult(A,tmp);
Matrix<T> tmp2;
//tmp2.copy(*_X);
_X->copyTo(tmp2);
//tmp.mult(*_X,tmp2,false,false,T(-1.0),T(1.0));
_X->multSwitch(tmp,tmp2,false,false,T(-1.0),T(1.0));
//tmp2.mult(*_X,tmp,false,true,T(-1.0),T(0.0));
_X->multSwitch(tmp2,tmp,true,false,T(-1.0),T(0.0));
grad.resize(A.m(),A.n());
_X->mult(tmp,grad,true,false);
};
virtual T fenchel(const Matrix<T>& input) const {
return 0.5*input.normFsq()+_X->dot(input);
}
virtual void var_fenchel(const Matrix<T>& A, Matrix<T>& grad1, Matrix<T>& grad2, const bool intercept) const {
Matrix<T> tmp(_X->m(),A.n());
_X->mult(A,tmp);
//grad1.copy(*_X);
_X->copyTo(grad1);
//tmp.mult(*_X,grad1,false,false,T(1.0),T(-1.0));
_X->multSwitch(tmp,grad1,false,false,T(1.0),T(-1.0));
//grad1.mult(*_X,tmp,false,true,T(1.0),T(0.0));
_X->multSwitch(grad1,tmp,true,false,T(1.0),T(0.0));
grad2.resize(A.m(),A.n());
_X->mult(tmp,grad2,true,false);
};
private:
explicit LossCur<T>(const LossCur<T>& dict);
LossCur<T>& operator=(const LossCur<T>& dict);
const AbstractMatrixB<T>* _X;
};
template <typename T>
class SqLossMat : public Loss<T, Matrix<T> , Matrix<T> > {
public:
SqLossMat(const AbstractMatrixB<T>& D) : _D(&D) { _compute_gram = false; };
SqLossMat(const AbstractMatrixB<T>& D, const Matrix<T>& G) : _D(&D), _G(&G) {
_compute_gram = true; };
virtual ~SqLossMat() { };
virtual inline void init(const Matrix<T>& x) {
_x.copy(x);
if (_compute_gram) {
_D->mult(x,_DtX,true,false);
}
};
inline T eval(const Matrix<T>& alpha) const {
Matrix<T> residual;
residual.copy(_x);
SpMatrix<T> spalpha;
alpha.toSparse(spalpha);
_D->mult(spalpha,residual,false,false,T(-1.0),T(1.0));
return 0.5*residual.normFsq();
}
inline void grad(const Matrix<T>& alpha, Matrix<T>& grad) const {
SpMatrix<T> spalpha;
alpha.toSparse(spalpha);
if (_compute_gram) {
grad.copy(_DtX);
_G->mult(spalpha,grad,false,false,T(1.0),-T(1.0));
} else {
Matrix<T> residual;
residual.copy(_x);
_D->mult(spalpha,residual,false,false,T(-1.0),T(1.0));
_D->mult(residual,grad,true,false,T(-1.0),T(0.0));
}
};
virtual inline bool test_backtracking(const Matrix<T>& y, const Matrix<T>& grad, const Matrix<T>& prox, const T L) const {
Matrix<T> tmp;
tmp.copy(y);
tmp.sub(prox);
SpMatrix<T> sptmp;
tmp.toSparse(sptmp);
if (_compute_gram) {
SpVector<T> col;
T sum=0;
for (int i = 0; i<sptmp.n(); ++i) {
sptmp.refCol(i,col);
sum += _G->quad(col);
}
return (sum <= L*sptmp.normFsq());
} else {
Matrix<T> tmp2;
_D->mult(sptmp,tmp2);
return (tmp2.normFsq() <= L*sptmp.normFsq());
}
};
virtual T fenchel(const Matrix<T>& input) const {
return 0.5*input.normFsq()+input.dot(_x);
};
virtual void var_fenchel(const Matrix<T>& x, Matrix<T>& grad1, Matrix<T>& grad2, const bool intercept) const {
grad1.copy(_x);
SpMatrix<T> spalpha;
x.toSparse(spalpha);
_D->mult(spalpha,grad1,false,false,T(1.0),T(-1.0));
if (intercept)
grad1.center();
_D->mult(grad1,grad2,true,false,T(1.0),T(0.0));
};
private:
explicit SqLossMat<T>(const SqLossMat<T>& dict);
SqLossMat<T>& operator=(const SqLossMat<T>& dict);
const AbstractMatrixB<T>* _D;
Matrix<T> _x;
bool _compute_gram;
const Matrix<T>* _G;
Matrix<T> _DtX;
};
template <typename T, typename L>
class LossMatSup : public Loss<T,Matrix<T>, Matrix<T> > {
public:
LossMatSup() { };
virtual ~LossMatSup() {
for (int i = 0; i<_N; ++i) {
delete(_losses[i]);
_losses[i]=NULL;
}
delete[](_losses);
};
virtual void init(const Matrix<T>& input) {
Vector<T> col;
_m=input.m();
for (int i = 0; i<_N; ++i) {
input.refCol(i,col);
_losses[i]->init(col);
}
};
inline T eval(const Matrix<T>& w) const {
Vector<T> col;
T sum = 0;
for (int i = 0; i<_N; ++i) {
w.refCol(i,col);
sum+=_losses[i]->eval(col);
}
return sum;
}
inline void grad(const Matrix<T>& w, Matrix<T>& grad) const {
Vector<T> col, col2;
grad.resize(w.m(),w.n());
for (int i = 0; i<_N; ++i) {
w.refCol(i,col);
grad.refCol(i,col2);
_losses[i]->grad(col,col2);
}
};
virtual T fenchel(const Matrix<T>& input) const {
Vector<T> col;
T sum = 0;
for (int i = 0; i<_N; ++i) {
input.refCol(i,col);
sum += _losses[i]->fenchel(col);
}
return sum;
}
virtual void var_fenchel(const Matrix<T>& x, Matrix<T>& grad1, Matrix<T>& grad2, const bool intercept) const {
grad1.resize(_m,x.n());
grad2.resize(x.m(),x.n());
Vector<T> col, col2, col3;
for (int i = 0; i<_N; ++i) {
x.refCol(i,col);
grad1.refCol(i,col2);
grad2.refCol(i,col3);
_losses[i]->var_fenchel(col,col2,col3,intercept);
}
};
virtual bool is_fenchel() const {
bool ok=true;
for (int i = 0; i<_N; ++i)
ok = ok && _losses[i]->is_fenchel();
return ok;
};
virtual void dummy() = 0;
private:
explicit LossMatSup<T,L>(const LossMatSup<T,L>& dict);
LossMatSup<T,L>& operator=(const LossMatSup<T,L>& dict);
int _m;
protected:
int _N;
L** _losses;
};
template <typename T, typename L>
class LossMat : public LossMatSup<T,L> { };
template <typename T, bool weighted>
class LossMat<T, LogLoss<T,weighted> > : public LossMatSup<T, LogLoss<T,weighted> > {
public:
LossMat(const int N, const AbstractMatrixB<T>& X) {
this->_N=N;
this->_losses=new LogLoss<T,weighted>*[this->_N];
Vector<T> col;
for (int i = 0; i<this->_N; ++i)
this->_losses[i]=new LogLoss<T,weighted>(X);
}
virtual void dummy() { };
virtual ~LossMat() { };
};
template <typename T>
class LossMat<T, SqLossMissing<T> > : public LossMatSup<T, SqLossMissing<T> > {
public:
LossMat(const int N, const AbstractMatrixB<T>& X) {
this->_N=N;
this->_losses=new SqLossMissing<T>*[this->_N];
Vector<T> col;
for (int i = 0; i<this->_N; ++i)
this->_losses[i]=new SqLossMissing<T>(X);
}
virtual void dummy() { };
virtual ~LossMat() { };
};
template <typename T>
class LossMat<T, PoissonLoss<T> > : public LossMatSup<T, PoissonLoss<T> > {
public:
LossMat(const int N, const AbstractMatrixB<T>& X,const T delta) {
this->_N=N;
this->_losses=new PoissonLoss<T>*[this->_N];
Vector<T> col;
for (int i = 0; i<this->_N; ++i)
this->_losses[i]=new PoissonLoss<T>(X,delta);
}
virtual void dummy() { };
virtual ~LossMat() { };
};
template <typename T, typename D = Vector<T> >
class Regularizer {
public:
Regularizer() { };
Regularizer(const ParamReg<T>& param) : _id(NA) {
_intercept=param.intercept;
_pos=param.pos;
}
virtual ~Regularizer() { };
virtual void reset() { };
virtual void prox(const D& input, D& output, const T lambda) = 0;
virtual T eval(const D& input) const = 0;
/// returns phi^star( input ) and ouput=input if the fenchel is unconstrained
/// returns 0 and scale input such that phi^star(output)=0 otherwise
virtual void fenchel(const D& input, T& val, T& scal) const = 0;
virtual bool is_fenchel() const { return true; };
virtual bool is_intercept() const { return _intercept; };
virtual bool is_subgrad() const { return false; };
virtual void sub_grad(const D& input, D& output) const { };
virtual T eval_paths(const D& x, SpMatrix<T>& paths_mat) const { return this->eval(x); };
virtual T eval_dual_norm(const D& x) const { return 0; };
// TODO complete for all norms
virtual T eval_dual_norm_paths(const D& x, SpMatrix<T>& path) const { return this->eval_dual_norm(x); };
regul_t inline id() const { return _id; };
virtual void linearize(const D& input) { };
virtual bool is_concave() const { return false; };
// virtual bool is_none() const { return false; };
// virtual bool is_pos() const { return _pos; };
protected:
bool _pos;
bool _intercept;
regul_t _id;
private:
explicit Regularizer<T,D>(const Regularizer<T,D>& reg);
Regularizer<T,D>& operator=(const Regularizer<T,D>& reg);
};
template <typename T>
class Lasso : public Regularizer<T> {
public:
Lasso(const ParamReg<T>& param) : Regularizer<T>(param) { this->_id = L1; };
virtual ~Lasso() { };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
y.copy(x);
if (this->_pos) y.thrsPos();
y.softThrshold(lambda);
if (this->_intercept) y[y.n()-1] = x[y.n()-1];
};
T inline eval(const Vector<T>& x) const {
return (this->_intercept ? x.asum() - abs(x[x.n()-1]) : x.asum());
};
void inline fenchel(const Vector<T>& input, T& val, T& scal) const {
Vector<T> output;
output.copy(input);
if (this->_pos) output.thrsPos();
T mm = output.fmaxval();
scal= mm > 1.0 ? T(1.0)/mm : 1.0;
val=0;
if (this->_intercept & (abs<T>(output[output.n()-1]) > EPSILON)) val=INFINITY;
};
virtual bool is_subgrad() const { return true; };
virtual void sub_grad(const Vector<T>& input, Vector<T>& output) const {
output.resize(input.n());
if (!this->_pos) {
for (int i = 0; i<input.n(); ++i) {
output[i] = input[i] > 0 ? T(1.0) : input[i] < 0 ? -T(1.0) : 0;
}
} else {
for (int i = 0; i<input.n(); ++i) {
output[i] = input[i] > 0 ? T(1.0) : 0;
}
}
if (this->_intercept) output[output.n()-1]=0;
}
};
template <typename T>
class LassoConstraint : public Regularizer<T> {
public:
LassoConstraint(const ParamReg<T>& param) : Regularizer<T>(param) {
_thrs=param.lambda;
this->_id = L1CONSTRAINT;
};
virtual ~LassoConstraint() { };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
Vector<T> tmp;
tmp.copy(x);
if (this->_intercept) {
tmp[tmp.n()-1]=0;
tmp.sparseProject(y,_thrs,1,0,0,0,this->_pos);
y[y.n()-1] = x[y.n()-1];
} else {
tmp.sparseProject(y,_thrs,1,0,0,0,this->_pos);
}
};
T inline eval(const Vector<T>& x) const {
return 0;
};
void inline fenchel(const Vector<T>& input, T& val, T& scal) const {
scal=1.0;
Vector<T> output;
output.copy(input);
if (this->_intercept) output[output.n()-1]=0;
val = _thrs*(this->_pos ? MAX(output.maxval(),0) : output.fmaxval());
};
virtual bool is_subgrad() const { return false; };
private:
T _thrs;
};
template <typename T>
class Lzero : public Regularizer<T> {
public:
Lzero(const ParamReg<T>& param) : Regularizer<T>(param) { };
virtual ~Lzero() { };
virtual bool is_fenchel() const { return false; };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
y.copy(x);
if (this->_pos) y.thrsPos();
y.hardThrshold(sqrt(2*lambda));
if (this->_intercept) y[y.n()-1] = x[y.n()-1];
};
T inline eval(const Vector<T>& x) const {
return (this->_intercept ? x.lzero() - 1 : x.lzero());
};
void inline fenchel(const Vector<T>& input, T& val, T& scal) const { };
};
template <typename T>
class LogDC : public Regularizer<T> {
public:
LogDC(const ParamReg<T>& param) : Regularizer<T>(param), _eps(param.lambda2d1) { };
virtual ~LogDC() { };
virtual bool is_fenchel() const { return false; };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
y.resize(x.n());
for (int i = 0; i<x.n(); ++i) y[i]=softThrs<T>(x[i],lambda*_weights[i]);
if (this->_pos) y.thrsPos();
};
void inline linearize(const Vector<T> &x) {
_weights.resize(x.n());
for (int i = 0; i<x.n(); ++i) _weights[i] = T(1.0)/(abs<T>(x[i])+_eps);
};
bool inline is_concave() const { return true; };
T inline eval(const Vector<T>& x) const {
T tmp=0;
for (int i = 0; i<x.n(); ++i) tmp+= log_alt<T>(abs<T>(x[i])+_eps);
return tmp;
};
void inline fenchel(const Vector<T>& input, T& val, T& scal) const { };
private:
const T _eps;
Vector<T> _weights;
};
template <typename T>
class None: public Regularizer<T>, public SplittingFunction<T, SpMatrix<T> > {
public:
None() { };
None(const ParamReg<T>& param) : Regularizer<T>(param) { };
virtual ~None() { };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
y.copy(x);
if (this->_pos) y.thrsPos();
};
T inline eval(const Vector<T>& x) const { return 0; };
void inline fenchel(const Vector<T>& input, T& val, T& scal) const { };
virtual bool is_fenchel() const { return false; };
virtual bool is_subgrad() const { return true; };
virtual void sub_grad(const Vector<T>& input, Vector<T>& output) const {
output.setZeros();
}
virtual void reset() { };
virtual T eval_split(const SpMatrix<T>& input) const { return 0; };
virtual int num_components() const { return 0; };
virtual void prox_split(SpMatrix<T>& splitted_w, const T lambda) const { };
virtual void init_split_variables(SpMatrix<T>& splitted_w) const { };
virtual void init(const Vector<T>& y) { };
// virtual bool is_none() const { return true; };
};
template <typename T>
class Ridge: public Regularizer<T> {
public:
Ridge(const ParamReg<T>& param) : Regularizer<T>(param) { this->_id = RIDGE; };
virtual ~Ridge() { };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
y.copy(x);
if (this->_pos) y.thrsPos();
y.scal(T(1.0/(1.0+lambda)));
if (this->_intercept) y[y.n()-1] = x[y.n()-1];
};
T inline eval(const Vector<T>& x) const {
return (this->_intercept ? 0.5*x.nrm2sq() - 0.5*x[x.n()-1]*x[x.n()-1] : 0.5*x.nrm2sq());
};
void inline fenchel(const Vector<T>& input, T& val, T& scal) const {
Vector<T> tmp;
tmp.copy(input);
if (this->_pos) tmp.thrsPos();
val=this->eval(tmp);
scal=T(1.0);
if (this->_intercept & (abs<T>(tmp[tmp.n()-1]) > EPSILON)) val=INFINITY;
};
virtual bool is_subgrad() const { return true; };
virtual void sub_grad(const Vector<T>& input, Vector<T>& output) const {
output.resize(input.n());
if (!this->_pos) {
for (int i = 0; i<input.n(); ++i) {
output[i] = input[i] > 0 ? 0.5*input[i] : 0;
}
} else {
output.copy(input);
output.scal(0.5);
}
if (this->_intercept) output[output.n()-1]=0;
}
};
template <typename T>
class normL2: public Regularizer<T> {
public:
normL2(const ParamReg<T>& param) : Regularizer<T>(param) { };
virtual ~normL2() { };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
y.copy(x);
if (this->_pos) y.thrsPos();
Vector<T> xref(x.rawX(),this->_intercept ? x.n()-1 : x.n());
const T nrm=xref.nrm2();
if (nrm < lambda) {
y.setZeros();
} else {
y.scal(T(1.0) - lambda/nrm);
}
if (this->_intercept) y[y.n()-1] = x[y.n()-1];
};
T inline eval(const Vector<T>& x) const {
Vector<T> xref(x.rawX(),this->_intercept ? x.n()-1 : x.n());
return xref.nrm2();
};
/// TODO add subgradient
void inline fenchel(const Vector<T>& input, T& val, T& scal) const {
Vector<T> output;
output.copy(input);
if (this->_pos) output.thrsPos();
T mm = output.nrm2();
scal= mm > 1.0 ? T(1.0)/mm : 1.0;
val=0;
if (this->_intercept & (abs<T>(output[output.n()-1]) > EPSILON)) val=INFINITY;
};
};
template <typename T>
class normLINF: public Regularizer<T> {
public:
normLINF(const ParamReg<T>& param) : Regularizer<T>(param) { };
virtual ~normLINF() { };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
y.copy(x);
if (this->_pos) y.thrsPos();
Vector<T> xref(y.rawX(),this->_intercept ? x.n()-1 : x.n());
Vector<T> row(xref.n());
xref.l1project(row,lambda);
for (int j = 0; j<xref.n(); ++j)
y[j]=y[j]-row[j];
if (this->_intercept) y[y.n()-1] = x[y.n()-1];
};
T inline eval(const Vector<T>& x) const {
Vector<T> xref(x.rawX(),this->_intercept ? x.n()-1 : x.n());
return xref.fmaxval();
};
/// TODO add subgradient
void inline fenchel(const Vector<T>& input, T& val, T& scal) const {
Vector<T> output;
output.copy(input);
if (this->_pos) output.thrsPos();
T mm = output.asum();
scal= mm > 1.0 ? T(1.0)/mm : 1.0;
val=0;
if (this->_intercept & (abs<T>(output[output.n()-1]) > EPSILON)) val=INFINITY;
};
};
template <typename T, typename D, typename RegA, typename RegB, bool order = true, bool scale_lambda = false>
class ComposeProx: public Regularizer<T,D> {
public:
ComposeProx(const ParamReg<T>& param) : Regularizer<T,D>(param) {
_lambda2d1=param.lambda2d1;
_regA=new RegA(param);
_regB=new RegB(param);
}
virtual ~ComposeProx() { delete(_regA); delete(_regB); };
void inline prox(const D& x, D& y, const T lambda) {
D tmp;
if (scale_lambda) {
if (order) {
_regA->prox(x,tmp,lambda);
_regB->prox(tmp,y,lambda*_lambda2d1/(T(1.0)+lambda));
} else {
_regB->prox(x,tmp,lambda*_lambda2d1);
_regA->prox(tmp,y,lambda/(T(1.0)+lambda*_lambda2d1));
}
} else {
if (order) {
_regA->prox(x,tmp,lambda);
_regB->prox(tmp,y,lambda*_lambda2d1);
} else {
_regB->prox(x,tmp,lambda*_lambda2d1);
_regA->prox(tmp,y,lambda);
}
}
};
T inline eval(const D& x) const {
return _regA->eval(x) + _lambda2d1*_regB->eval(x);
};
virtual bool is_fenchel() const { return false; };
void inline fenchel(const D& input, T& val, T& scal) const { };
virtual bool is_subgrad() const { return _regA->is_subgrad() && _regB->is_subgrad(); };
virtual void sub_grad(const D& input, D& output) const {
_regA->sub_grad(input,output);
D tmp;
_regB->sub_grad(input,tmp);
output.add(tmp,_lambda2d1);
};
private:
RegA* _regA;
RegB* _regB;
T _lambda2d1;
};
template <typename T>
struct ElasticNet {
typedef ComposeProx< T, Vector<T>, Lasso<T>, Ridge<T>, true > type;
};
template <typename T>
class FusedLasso: public Regularizer<T> {
public:
FusedLasso(const ParamReg<T>& param) : Regularizer<T>(param) {
_lambda2d1=param.lambda2d1;
_lambda3d1=param.lambda3d1;
};
virtual ~FusedLasso() { };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
y.resize(x.n());
Vector<T> copyx;
copyx.copy(x);
copyx.fusedProjectHomotopy(y,_lambda2d1*lambda,lambda,_lambda3d1*lambda,true);
};
T inline eval(const Vector<T>& x) const {
T sum = T();
const int maxn = this->_intercept ? x.n()-1 : x.n();
for (int i = 0; i<maxn-1; ++i)
sum += abs(x[i+1]-x[i]) + _lambda2d1*abs(x[i]) + 0.5*_lambda3d1*x[i]*x[i];
sum += _lambda2d1*abs(x[maxn-1])+0.5*_lambda3d1*x[maxn-1]*x[maxn-1];
return sum;
};
virtual bool is_fenchel() const { return false; };
void inline fenchel(const Vector<T>& input, T& val, T& scal) const { };
private:
T _lambda2d1;
T _lambda3d1;
};
template <typename T>
class GraphLasso : public Regularizer<T>, public SplittingFunction<T, SpMatrix<T> > {
public:
GraphLasso(const ParamReg<T>& param) : Regularizer<T>(param) {
const bool resetflow = param.resetflow;
const bool linf = param.linf;
const bool clever = param.clever;
const GraphStruct<T>& graph_st=*(param.graph_st);
_clever=clever;
_resetflow=resetflow;
_graph.create_graph(graph_st.Nv,graph_st.Ng,graph_st.weights,
graph_st.gv_ir,graph_st.gv_jc,graph_st.gg_ir,graph_st.gg_jc);
_graph.save_capacities();
_work.resize(graph_st.Nv+graph_st.Ng+2);
_weights.resize(graph_st.Ng);
for (int i = 0; i<graph_st.Ng; ++i) _weights[i] = graph_st.weights[i];
_old_lambda=-1.0;
_linf=linf;
};
virtual ~GraphLasso() { };
void inline reset() { _old_lambda = -1.0; };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
if (!_linf) {
cerr << "Not implemented" << endl;
exit(1);
}
y.copy(x);
_graph.restore_capacities();
_graph.set_weights(_weights.rawX(),lambda);
if (_old_lambda < 0 || _resetflow) {
_graph.reset_flow();
} else {
if (lambda != _old_lambda)
_graph.scale_flow(lambda/_old_lambda);
}
if (this->_pos) {
Vector<T> xc;
xc.copy(x);
xc.thrsPos();
_graph.proximal_operator(xc.rawX(),y.rawX(),_clever);
} else {
_graph.proximal_operator(x.rawX(),y.rawX(),_clever);
}
#ifdef VERB2
T duality_gap2 = y.nrm2sq()-y.dot(x)+lambda*this->eval(y);
cerr << "duality_gap2 " << duality_gap2 << endl;
#endif
_old_lambda=lambda;
};
T inline eval(const Vector<T>& x) const {
Graph<T>* gr = const_cast<Graph<T>* >(&_graph);
gr->restore_capacities();
return gr->norm(x.rawX(),_work.rawX(),_weights.rawX(),_linf);
};
virtual bool is_fenchel() const {
return _linf;
};
void inline fenchel(const Vector<T>& input, T& val, T& scal) const {
Graph<T>* gr = const_cast<Graph<T>* >(&_graph);
if (!_resetflow) {
gr->save_flow();
}
gr->reset_flow();
gr->restore_capacities();
Vector<T> output;
output.copy(input);
if (this->_pos) output.thrsPos();
T mm = gr->dual_norm_inf(output,_weights);
if (!_resetflow)
gr->restore_flow();
scal= mm > 1.0 ? T(1.0)/mm : 1.0;
val=0;
if (this->_intercept & (abs<T>(input[input.n()-1]) > EPSILON)) val=INFINITY;
};
virtual void init(const Vector<T>& y) { };
inline int num_components() const { return _weights.n(); };
inline void prox_split(SpMatrix<T>& splitted_w, const T lambda) const {
Vector<T> tmp;
SpVector<T> col;
if (_linf) {
for (int i = 0; i<splitted_w.n(); ++i) {
splitted_w.refCol(i,col);
tmp.setData(col.rawX(),col.nzmax());
Vector<T> res;
res.copy(tmp);
vAbs<T>(res.n(),res.rawX(),res.rawX());
T thrs=project_tree_l1(res.rawX(),res.n(),lambda);
tmp.thrsabsmin(thrs);
}
} else {
for (int i = 0; i<splitted_w.n(); ++i) {
splitted_w.refCol(i,col);
tmp.setData(col.rawX(),col.nzmax());
const T nrm = tmp.nrm2();
if (nrm > lambda*_weights[i]) {
tmp.scal(T(1.0)-lambda*_weights[i]/nrm);
} else {
tmp.setZeros();
}
}
}
};
inline void init_split_variables(SpMatrix<T>& splitted_w) const {
Graph<T>* gr = const_cast<Graph<T>* >(&_graph);
gr->init_split_variables(splitted_w);
};
inline T eval_split(const SpMatrix<T>& input) const {
SpVector<T> col;
T sum = 0;
for (int i = 0; i<input.n(); ++i) {
input.refCol(i,col);
sum += _linf ? _weights[i]*col.fmaxval() : _weights[i]*col.nrm2();
}
return sum;
}
inline T eval_weighted(const Vector<T>& input,
const SpMatrix<T>& input_struct, const T* inner_weight) const {
SpVector<T> col;
T sum = 0;
Vector<T> tmp(input_struct.m());
for (int i = 0; i<input_struct.n(); ++i) {
input_struct.refCol(i,col);
tmp.setn(col.L());
for (int j = 0; j<col.L(); ++j)
tmp[j]=inner_weight[j]*input[col.r(j)];
sum += _linf ? _weights[i]*tmp.fmaxval() : _weights[i]*tmp.nrm2();
}
return sum;
}
private:
bool _clever;
Graph<T> _graph;
bool _resetflow;
Vector<T> _work;
Vector<T> _weights;
T _old_lambda;
bool _linf;
};
template <typename T>
struct GraphLassoRidge {
typedef ComposeProx<T, Vector<T>, GraphLasso<T>, Ridge<T>, true> type;
};
template <typename T>
class TreeLasso : public Regularizer<T> {
public:
TreeLasso(const ParamReg<T>& param) : Regularizer<T>(param) {
const TreeStruct<T>& tree_st=*(param.tree_st);
const bool linf = param.linf;
_tree.create_tree(tree_st.Nv,tree_st.own_variables,
tree_st.N_own_variables,tree_st.weights,
tree_st.groups_ir,tree_st.groups_jc,
tree_st.Ng,0);
_linf=linf;
};
virtual ~TreeLasso() { };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
y.copy(x);
if (this->_pos) y.thrsPos();
Vector<T> yp;
if (this->_intercept) {
yp.setData(y.rawX(),y.n()-1);
} else {
yp.setData(y.rawX(),y.n());
}
_tree.proj(yp,_linf,lambda);
};
T inline eval(const Vector<T>& x) const {
return const_cast<Tree_Seq<T>* >(&_tree)->val_norm(x.rawX(),0,_linf);
};
void inline fenchel(const Vector<T>& y, T& val, T& scal) const {
if (_linf) {
Vector<T> yp;
if (this->_intercept) {
yp.setData(y.rawX(),y.n()-1);
} else {
yp.setData(y.rawX(),y.n());
}
Vector<T> yp2;
yp2.copy(yp);
if (this->_pos) yp2.thrsPos();
T mm = const_cast<Tree_Seq<T>* >(&_tree)->dual_norm_inf(yp2);
scal= mm > 1.0 ? T(1.0)/mm : 1.0;
val=0;
if (this->_intercept & (abs<T>(y[y.n()-1]) > EPSILON)) val=INFINITY;
}
};
virtual bool is_fenchel() const {
return _linf;
};
virtual bool is_subgrad() const { return true; };
virtual void sub_grad(const Vector<T>& input, Vector<T>& output) const {
output.resize(input.n());
const_cast<Tree_Seq<T>*>(&_tree)->sub_grad(input,output,_linf);
if (this->_intercept) output[output.n()-1]=0;
}
private:
Tree_Seq<T> _tree;
bool _linf;
};
template <typename T>
class TreeLzero : public Regularizer<T> {
public:
TreeLzero(const ParamReg<T>& param) : Regularizer<T>(param) {
const TreeStruct<T>& tree_st=*(param.tree_st);
_tree.create_tree(tree_st.Nv,tree_st.own_variables,
tree_st.N_own_variables,tree_st.weights,
tree_st.groups_ir,tree_st.groups_jc,
tree_st.Ng,0);
};
virtual ~TreeLzero() { };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
y.copy(x);
if (this->_pos) y.thrsPos();
Vector<T> yp;
if (this->_intercept) {
yp.setData(y.rawX(),y.n()-1);
} else {
yp.setData(y.rawX(),y.n());
}
_tree.proj_zero(yp,lambda);
};
T inline eval(const Vector<T>& x) const {
return const_cast<Tree_Seq<T>* >(&_tree)->val_zero(x.rawX(),0);
};
virtual bool is_fenchel() const { return false; };
void inline fenchel(const Vector<T>& y, T& val, T& scal) const { };
private:
Tree_Seq<T> _tree;
};
template <typename T, typename ProxMat>
class ProxMatToVec : public Regularizer<T> {
public:
ProxMatToVec(const ParamReg<T>& param) : Regularizer<T>(param) {
_size_group=param.size_group;
ParamReg<T> param2=param;
param2.intercept=false;
_proxy = new ProxMat(param2);
};
virtual ~ProxMatToVec() { delete(_proxy); };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
y.resize(x.n());
int size_vec=static_cast<int>(this->_intercept ? x.n()-1 : x.n());
Matrix<T> mX(x.rawX(),_size_group,size_vec/_size_group);
Matrix<T> mY(y.rawX(),_size_group,size_vec/_size_group);
_proxy->prox(mX,mY,lambda);
if (this->_intercept) y[y.n()-1]=x[x.n()-1];
}
T inline eval(const Vector<T>& x) const {
int size_vec=this->_intercept ? x.n()-1 : x.n();
Matrix<T> mX(x.rawX(),_size_group,size_vec/_size_group);
return _proxy->eval(mX);
}
virtual bool is_fenchel() const { return (_proxy->is_fenchel()); };
void inline fenchel(const Vector<T>& x, T& val, T& scal) const {
int size_vec=this->_intercept ? x.n()-1 : x.n();
Matrix<T> mX(x.rawX(),_size_group,size_vec/_size_group);
_proxy->fenchel(mX,val,scal);
};
private:
int _size_group;
ProxMat* _proxy;
};
template <typename T, typename Reg>
class GroupProx : public Regularizer<T> {
public:
GroupProx(const ParamReg<T> & param) : Regularizer<T>(param) {
ParamReg<T> param2=param;
param2.intercept=false;
_size_group=param.size_group;
if (param.groups) {
int num_groups=0;
for (int i = 0; i<param.ngroups; ++i) num_groups=MAX(num_groups,param.groups[i]);
_groups.resize(num_groups);
for (int i = 0; i<num_groups; ++i) _groups[i]=new list_int();
for (int i = 0; i<param.ngroups; ++i) _groups[param.groups[i]-1]->push_back(i);
}
_prox = new Reg(param2);
}
virtual ~GroupProx() {
delete(_prox);
for (int i = 0; i<static_cast<int>(_groups.size()); ++i) delete(_groups[i]);
};
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
y.copy(x);
const int maxn= this->_intercept ? x.n()-1 : x.n();
if (!_groups.empty()) {
for (int i = 0; i<static_cast<int>(_groups.size()); ++i) {
list_int* group=_groups[i];
Vector<T> tmp(group->size());
Vector<T> tmp2(group->size());
int count=0;
for (const_iterator_int it = group->begin(); it != group->end(); ++it) {
tmp[count++]=x[*it];
}
_prox->prox(tmp,tmp2,lambda);
count=0;
for (const_iterator_int it = group->begin(); it != group->end(); ++it) {
y[*it]=tmp2[count++];
}
}
} else {
Vector<T> tmp;
Vector<T> tmp2;
const int p = _size_group;
for (int i = 0; i+p-1<maxn; i+=p) {
tmp.setPointer(x.rawX()+i,p);
tmp2.setPointer(y.rawX()+i,p);
_prox->prox(tmp,tmp2,lambda);
}
}
}
T inline eval(const Vector<T>& x) const {
const int maxn= this->_intercept ? x.n()-1 : x.n();
T sum=0;
if (!_groups.empty()) {
for (int i = 0; i<static_cast<int>(_groups.size()); ++i) {
list_int* group=_groups[i];
Vector<T> tmp(group->size());
int count=0;
for (const_iterator_int it = group->begin(); it != group->end(); ++it) {
tmp[count++]=x[*it];
}
sum+=_prox->eval(tmp);
}
} else {
Vector<T> tmp;
const int p = _size_group;
for (int i = 0; i+p-1<maxn; i+=p) {
tmp.setPointer(x.rawX()+i,p);
sum+=_prox->eval(tmp);
}
}
return sum;
}
virtual bool is_fenchel() const { return _prox->is_fenchel(); };
void inline fenchel(const Vector<T>& x, T& val, T& scal) const {
const int maxn= this->_intercept ? x.n()-1 : x.n();
T val2;
T scal2;
scal=T(1.0);
val=0;
if (!_groups.empty()) {
for (int i = 0; i<static_cast<int>(_groups.size()); ++i) {
list_int* group=_groups[i];
Vector<T> tmp(group->size());
int count=0;
for (const_iterator_int it = group->begin(); it != group->end(); ++it) {
tmp[count++]=x[*it];
}
_prox->fenchel(tmp,val2,scal2);
val+=val2;
scal=MIN(scal,scal2);
}
} else {
const int p = _size_group;
Vector<T> tmp;
for (int i = 0; i+p-1<maxn; i+=p) {
tmp.setPointer(x.rawX()+i,p);
_prox->fenchel(tmp,val2,scal2);
val+=val2;
scal=MIN(scal,scal2);
}
}
};
protected:
int _size_group;
std::vector<list_int*> _groups;
Reg* _prox;
};
template <typename T>
struct GroupLassoL2 {
typedef GroupProx<T, normL2<T> > type;
};
template <typename T>
struct GroupLassoLINF {
typedef GroupProx<T, normLINF<T> > type;
};
template <typename T>
struct GroupLassoL2_L1 {
typedef ComposeProx<T, Vector<T>, typename GroupLassoL2<T>::type, Lasso<T>, false> type;
};
template <typename T>
struct GroupLassoLINF_L1 {
typedef ComposeProx<T, Vector<T>, typename GroupLassoLINF<T>::type, Lasso<T>, false> type;
};
template <typename T>
class MixedL1L2 : public Regularizer<T,Matrix<T> > {
public:
MixedL1L2(const ParamReg<T>& param) : Regularizer<T,Matrix<T> >(param) { };
virtual ~MixedL1L2() { };
void inline prox(const Matrix<T>& x, Matrix<T>& y, const T lambda) {
Vector<T> norm;
y.copy(x);
if (this->_pos) y.thrsPos();
y.norm_2_rows(norm);
y.setZeros();
const int m = x.m();
const int n = x.n();
for (int i = 0; i<m; ++i) {
if (norm[i] > lambda) {
T scal = (norm[i]-lambda)/norm[i];
for (int j = 0; j<n; ++j)
y[j*m+i] = x[j*m+i]*scal;
}
}
if (this->_pos) y.thrsPos();
if (this->_intercept)
for (int j = 0; j<n; ++j)
y[j*m+m-1]=x[j*m+m-1];
}
T inline eval(const Matrix<T>& x) const {
Vector<T> norm;
x.norm_2_rows(norm);
return this->_intercept ? norm.asum() - norm[norm.n() -1] : norm.asum();
}
virtual bool is_subgrad() const { return true; };
virtual void sub_grad(const Matrix<T>& input, Matrix<T>& output) const {
Vector<T> norm;
input.norm_2_rows(norm);
for (int i = 0; i<norm.n(); ++i) {
if (norm[i] < 1e-20) norm[i]=T(1.0);
}
norm.inv();
if (this->_intercept) norm[norm.n()-1]=0;
output.copy(input);
output.multDiagLeft(norm);
};
void inline fenchel(const Matrix<T>& input, T& val, T& scal) const {
Vector<T> norm;
if (this->_pos) {
Matrix<T> output;
output.copy(input);
output.thrsPos();
output.norm_2_rows(norm);
} else {
input.norm_2_rows(norm);
}
T mm = norm.fmaxval();
scal= mm > 1.0 ? T(1.0)/mm : 1.0;
val=0;
if (this->_intercept & (abs<T>(norm[norm.n()-1]) > EPSILON)) val=INFINITY;
};
};
template <typename T>
class MixedL1LINF : public Regularizer<T,Matrix<T> > {
public:
MixedL1LINF(const ParamReg<T>& param) : Regularizer<T,Matrix<T> >(param) { };
virtual ~MixedL1LINF() { };
void inline prox(const Matrix<T>& x, Matrix<T>& y, const T lambda) {
y.copy(x);
if (this->_pos) y.thrsPos();
Vector<T> row(x.n());
Vector<T> row2(x.n());
const int maxn= this->_intercept ? x.m()-1 : x.m();
for (int i = 0; i< maxn; ++i) {
for (int j = 0; j<x.n(); ++j)
row[j]=y(i,j);
row.l1project(row2,lambda);
for (int j = 0; j<x.n(); ++j)
y(i,j) = row[j]-row2[j];
}
}
T inline eval(const Matrix<T>& x) const {
Vector<T> norm;
x.norm_inf_rows(norm);
return this->_intercept ? norm.asum() - norm[norm.n() -1] : norm.asum();
}
void inline fenchel(const Matrix<T>& input, T& val, T& scal) const {
Vector<T> norm;
if (this->_pos) {
Matrix<T> output;
output.copy(input);
output.thrsPos();
output.norm_l1_rows(norm);
} else {
input.norm_l1_rows(norm);
}
if (this->_intercept) norm[norm.n()-1]=0;
T mm = norm.fmaxval();
scal= mm > 1.0 ? T(1.0)/mm : 1.0;
val=0;
if (this->_intercept & (abs<T>(norm[norm.n()-1]) > EPSILON)) val=INFINITY;
};
virtual bool is_subgrad() const { return true; };
virtual void sub_grad(const Matrix<T>& input, Matrix<T>& output) const {
output.resize(input.m(),input.n());
output.setZeros();
const T maxm= this->_intercept ? input.m()-1 : input.m();
Vector<T> row(input.n());
for (int i = 0; i<maxm; ++i) {
input.copyRow(i,row);
T max=row.fmaxval();
if (max > 1e-15) {
int num_max=0;
for (int j = 0; j<row.n(); ++j) {
if (abs<T>(max-abs<T>(row[j])) < 1e-15)
num_max++;
}
T add = T(1.0)/num_max;
for (int j = 0; j<row.n(); ++j) {
if (abs<T>(max-abs<T>(row[j])) < 1e-15)
row[j] = row[j] > 0 ? add : -add;
}
output.setRow(i,row);
}
}
};
};
template <typename T>
class TraceNorm : public Regularizer<T,Matrix<T> > {
public:
TraceNorm(const ParamReg<T>& param) : Regularizer<T,Matrix<T> >(param) {
if (param.intercept) {
cerr << "Trace norm implementation is not compatible with intercept, intercept deactivated" << endl;
}
if (param.pos) {
cerr << "Trace norm implementation is not compatible with non-negativity constraints" << endl;
}
};
virtual ~TraceNorm() { };
void inline prox(const Matrix<T>& x, Matrix<T>& y, const T lambda) {
//Matrix<T> tmp;
//tmp.copy(x);
Matrix<T> U;
Matrix<T> V;
Vector<T> S;
x.svd(U,S,V);
S.softThrshold(lambda);
U.multDiagRight(S);
U.mult(V,y);
/* Vector<T> u0(x.m());
u0.setZeros();
Vector<T> u, v;
for (int i = 0; i<MIN(x.m(),x.n()); ++i) {
tmp.svdRankOne(u0,u,v);
T val=v.nrm2();
if (val < lambda) break;
y.rank1Update(u,v,(val-lambda)/val);
tmp.rank1Update(u,v,-T(1.0));
}*/
}
T inline eval(const Matrix<T>& x) const {
Vector<T> tmp;
x.singularValues(tmp);
return tmp.sum();
/* Matrix<T> XtX;
if (x.m() > x.n()) {
x.XtX(XtX);
} else {
x.XXt(XtX);
}
T sum=0;
Vector<T> u0(XtX.m());
u0.setAleat();
for (int i = 0; i<XtX.m(); ++i) {
T val=XtX.eigLargestMagnSym(u0,u0); // uses power method
XtX.rank1Update(u0,u0,-val);
sum+=sqrt(val);
if (val <= 1e-10) break;
}
return sum;
*/
}
void inline fenchel(const Matrix<T>& input, T& val, T& scal) const {
//Vector<T> u0(input.m());
//u0.setZeros();
//Vector<T> u, v;
//input.svdRankOne(u0,u,v);
//T mm = v.nrm2();
Vector<T> tmp;
input.singularValues(tmp);
T mm = tmp.fmaxval();
scal= mm > 1.0 ? T(1.0)/mm : 1.0;
val=0;
};
};
template <typename T>
class Rank : public Regularizer<T,Matrix<T> > {
public:
Rank(const ParamReg<T>& param) : Regularizer<T,Matrix<T> >(param) {
if (param.intercept) {
cerr << "Rank implementation is not compatible with intercept, intercept deactivated" << endl;
}
if (param.pos) {
cerr << "Rank implementation is not compatible with non-negativity constraints" << endl;
}
};
virtual ~Rank() { };
void inline prox(const Matrix<T>& x, Matrix<T>& y, const T lambda) {
Matrix<T> tmp;
tmp.copy(x);
y.resize(x.m(),x.n());
y.setZeros();
Vector<T> u0(x.m());
u0.setZeros();
Vector<T> u, v;
for (int i = 0; i<MIN(x.m(),x.n()); ++i) {
tmp.svdRankOne(u0,u,v);
T val=v.nrm2();
if (val*val < lambda) break;
y.rank1Update(u,v);
tmp.rank1Update(u,v,-T(1.0));
}
}
T inline eval(const Matrix<T>& x) const {
Matrix<T> XtX;
if (x.m() > x.n()) {
x.XtX(XtX);
} else {
x.XXt(XtX);
}
T sum=0;
Vector<T> u0(XtX.m());
u0.setAleat();
for (int i = 0; i<XtX.m(); ++i) {
T val=XtX.eigLargestMagnSym(u0,u0); // uses power method
XtX.rank1Update(u0,u0,-val);
sum++;
if (val <= 1e-10) break;
}
return sum;
}
virtual bool is_fenchel() const { return false; };
void inline fenchel(const Matrix<T>& input, T& val, T& scal) const { };
};
template <typename T>
inline void convert_paths_to_mat(const List<Path<long long>*>& paths,SpMatrix<T>& paths_mat, const int n) {
int nzmax=0;
for (ListIterator<Path<long long>*> it=paths.begin(); it != paths.end(); ++it)
nzmax+=it->nodes.size();
paths_mat.resize(n,paths.size(),nzmax);
INTM* pB =paths_mat.pB();
INTM* pE =paths_mat.pE();
INTM* r =paths_mat.r();
T* v =paths_mat.v();
int count_col=0;
int count=0;
pB[0]=0;
for (ListIterator<Path<long long>*> it_path=paths.begin();
it_path != paths.end(); ++it_path) {
for (const_iterator_int it = it_path->nodes.begin();
it != it_path->nodes.end(); ++it) {
r[count]= *it;
v[count++]= it_path->flow;
}
pB[++count_col]=count;
}
for (int i = 0; i<paths_mat.n(); ++i) sort(r,v,pB[i],pE[i]-1);
};
template <typename T>
class GraphPathL0 : public Regularizer<T> {
public:
GraphPathL0(const ParamReg<T>& param) : Regularizer<T>(param) {
const GraphPathStruct<T>& graph=*(param.graph_path_st);
_graph.init_graph(graph);
}
virtual ~GraphPathL0() { };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
// DEBUG
y.copy(x);
if (this->_pos) y.thrsPos();
_graph.proximal_l0(y.rawX(),lambda);
};
T inline eval(const Vector<T>& x) const {
return const_cast<GraphPath<T>* >(&_graph)->eval_l0(x.rawX());
};
T inline eval_paths(const Vector<T>& x, SpMatrix<T>& paths_mat) const {
List<Path<long long>*> paths;
T val=const_cast<GraphPath<T>* >(&_graph)->eval_l0(x.rawX(),&paths);
convert_paths_to_mat<T>(paths,paths_mat,_graph.n());
for (ListIterator<Path<>*> it_path=paths.begin();
it_path != paths.end(); ++it_path) delete(*it_path);
return val;
};
virtual bool is_fenchel() const { return false; };
void inline fenchel(const Vector<T>& input, T& val, T& scal) const { };
private:
GraphPath<T> _graph;
};
template <typename T>
class GraphPathConv : public Regularizer<T> {
public:
GraphPathConv(const ParamReg<T>& param) : Regularizer<T>(param) {
const GraphPathStruct<T>& graph=*(param.graph_path_st);
_graph.init_graph(graph);
}
virtual ~GraphPathConv() { };
void inline prox(const Vector<T>& x, Vector<T>& y, const T lambda) {
y.copy(x);
if (this->_pos) y.thrsPos();
_graph.proximal_conv(y.rawX(),lambda);
};
T inline eval(const Vector<T>& x) const {
return const_cast<GraphPath<T>* >(&_graph)->eval_conv(x.rawX());
};
T inline eval_dual_norm(const Vector<T>& x) const {
return const_cast<GraphPath<T>* >(&_graph)->eval_dual_norm(x.rawX(),NULL);
};
T inline eval_paths(const Vector<T>& x, SpMatrix<T>& paths_mat) const {
List<Path<long long>*> paths;
T val=const_cast<GraphPath<T>* >(&_graph)->eval_conv(x.rawX(),&paths);
convert_paths_to_mat<T>(paths,paths_mat,_graph.n());
for (ListIterator<Path<long long>*> it_path=paths.begin();
it_path != paths.end(); ++it_path) delete(*it_path);
return val;
};
T inline eval_dual_norm_paths(const Vector<T>& x, SpMatrix<T>& paths_mat) const {
Path<long long> path;
T val=const_cast<GraphPath<T>* >(&_graph)->eval_dual_norm(x.rawX(),&path.nodes);
List<Path<long long>*> paths;
paths.push_back(&path);
path.flow_int=1;
path.flow=double(1.0);
convert_paths_to_mat<T>(paths,paths_mat,_graph.n());
return val;
};
virtual bool is_fenchel() const { return true; };
void inline fenchel(const Vector<T>& input, T& val, T& scal) const {
T mm;
if (this->_pos) {
Vector<T> output;
output.copy(input);
output.thrsPos();
mm = const_cast<GraphPath<T>* >(&_graph)->eval_dual_norm(output.rawX(),NULL);
} else {
mm = const_cast<GraphPath<T>* >(&_graph)->eval_dual_norm(input.rawX(),NULL);
}
scal= mm > 1.0 ? T(1.0)/mm : 1.0;
val=0;
if (this->_intercept & (abs<T>(input[input.n()-1]) > EPSILON)) val=INFINITY;
};
private:
GraphPath<T> _graph;
};
template <typename T,typename Reg>
class RegMat : public Regularizer<T,Matrix<T> > {
public:
RegMat(const ParamReg<T>& param) : Regularizer<T,Matrix<T> >(param) {
_transpose=param.transpose;
const int N = param.num_cols;
_regs=new Reg*[N];
_N=N;
for (int i = 0; i<N; ++i)
_regs[i]=new Reg(param);
};
virtual ~RegMat() {
for (int i = 0; i<_N; ++i) {
delete(_regs[i]);
_regs[i]=NULL;
}
delete[](_regs);
};
void inline reset() {
for (int i = 0; i<_N; ++i) _regs[i]->reset();
};
void inline prox(const Matrix<T>& x, Matrix<T>& y, const T lambda) {
y.copy(x);
int i;
if (_transpose) {
#pragma omp parallel for private(i)
for (i = 0; i<_N; ++i) {
Vector<T> colx, coly;
x.copyRow(i,colx);
_regs[i]->prox(colx,coly,lambda);
y.setRow(i,coly);
}
} else {
#pragma omp parallel for private(i)
for (i = 0; i<_N; ++i) {
Vector<T> colx, coly;
x.refCol(i,colx);
y.refCol(i,coly);
_regs[i]->prox(colx,coly,lambda);
}
}
};
virtual bool is_subgrad() const {
bool ok=true;
for (int i = 0; i<_N; ++i)
ok=ok && _regs[i]->is_subgrad();
return ok;
};
void inline sub_grad(const Matrix<T>& x, Matrix<T>& y) const {
y.resize(x.m(),x.n());
Vector<T> colx, coly, cold;
if (_transpose) {
for (int i = 0; i<_N; ++i) {
x.copyRow(i,colx);
_regs[i]->sub_grad(colx,coly);
y.setRow(i,coly);
}
} else {
for (int i = 0; i<_N; ++i) {
x.refCol(i,colx);
y.refCol(i,coly);
_regs[i]->sub_grad(colx,coly);
}
}
};
T inline eval(const Matrix<T>& x) const {
T sum = 0;
int i;
#pragma omp parallel for private(i)
for (i = 0; i<_N; ++i) {
Vector<T> col;
if (_transpose) {
x.copyRow(i,col);
} else {
x.refCol(i,col);
}
#pragma omp critical
sum += _regs[i]->eval(col);
}
return sum;
};
void inline fenchel(const Matrix<T>& input, T& val, T& scal) const {
Vector<T> col;
val = 0;
scal = 1.0;
for (int i = 0; i<_N; ++i) {
if (_transpose) {
input.copyRow(i,col);
} else {
input.refCol(i,col);
}
T val2 = 0;
T scal2 = 1.0;
_regs[i]->fenchel(col,val2,scal2);
scal=MIN(scal,scal2);
val += val2;
}
};
virtual bool is_fenchel() const {
bool ok=true;
for (int i = 0; i<_N; ++i)
ok = ok && _regs[i]->is_fenchel();
return ok;
};
protected:
int _N;
Reg** _regs;
bool _transpose;
};
template <typename T>
struct MixedL1L2_L1 {
typedef ComposeProx<T, Matrix<T>, MixedL1L2<T>, RegMat<T, Lasso<T> >, false> type;
};
template <typename T>
struct MixedL1LINF_L1 {
typedef ComposeProx<T, Matrix<T>, MixedL1LINF<T>, RegMat<T, Lasso<T> >, false> type;
};
template <typename T>
class SpecGraphMat : public Regularizer<T,Matrix<T> > {
public:
SpecGraphMat(const ParamReg<T>& param) : Regularizer<T,Matrix<T> >(param) { };
virtual ~SpecGraphMat() { delete(_graphlasso); };
virtual void dummy() = 0;
void inline reset() { _graphlasso->reset(); };
void inline prox(const Matrix<T>& x, Matrix<T>& y, const T lambda) {
Vector<T> xv, yv;
x.toVect(xv);
y.resize(x.m(),x.n());
y.toVect(yv);
_graphlasso->prox(xv,yv,lambda);
}
T inline eval(const Matrix<T>& X) const {
Vector<T> xv;
X.toVect(xv);
return _graphlasso->eval(xv);
}
void inline fenchel(const Matrix<T>& input, T& val, T& scal) const {
Vector<T> inv;
input.toVect(inv);
_graphlasso->fenchel(inv,val,scal);
};
virtual bool is_fenchel() const {
return _graphlasso->is_fenchel();
};
protected:
GraphLasso<T>* _graphlasso;
};
template <typename T>
class MixedL1LINFCR : public SpecGraphMat<T> {
public:
MixedL1LINFCR(const int m, const ParamReg<T>& param) : SpecGraphMat<T>(param) {
const int n = param.num_cols;
const T l2dl1 = param.lambda2d1;
GraphStruct<T> graph_st;
graph_st.Nv=m*n;
graph_st.Ng=m+n;
T* weights = new T[graph_st.Ng];
for (int i = 0; i<n; ++i) weights[i]=T(1.0);
for (int i = 0; i<m; ++i) weights[i+n]=l2dl1;
graph_st.weights=weights;
mwSize* gv_jc = new mwSize[graph_st.Ng+1];
mwSize* gv_ir = new mwSize[m*n*2];
for (int i = 0; i<n; ++i) {
gv_jc[i]=i*m;
for (int j = 0; j<m; ++j)
gv_ir[i*m+j]=i*m+j;
}
for (int i = 0; i<m; ++i) {
gv_jc[i+n]=i*n+n*m;
for (int j = 0; j<n; ++j)
gv_ir[i*n+n*m+j]=j*m+i;
}
gv_jc[m+n]=2*m*n;
graph_st.gv_jc=gv_jc;
graph_st.gv_ir=gv_ir;
mwSize* gg_jc = new mwSize[graph_st.Ng+1];
mwSize* gg_ir = new mwSize[1];
for (int i = 0; i< graph_st.Ng+1; ++i) gg_jc[i]=0;
graph_st.gg_jc=gg_jc;
graph_st.gg_ir=gg_ir;
ParamReg<T> param_lasso = param;
param_lasso.graph_st = &graph_st;
this->_graphlasso = new GraphLasso<T>(param_lasso);
delete[](weights);
delete[](gv_jc);
delete[](gv_ir);
delete[](gg_jc);
delete[](gg_ir);
};
virtual ~MixedL1LINFCR() { };
virtual void dummy() { };
};
template <typename T>
class TreeMult : public SpecGraphMat<T> {
public:
TreeMult(const ParamReg<T>& param) : SpecGraphMat<T>(param) {
const TreeStruct<T>& tree_st=*(param.tree_st);
const int N = param.num_cols;
const T l1dl2 = param.lambda2d1;
GraphStruct<T> graph_st;
int Nv=tree_st.Nv;
if (param.intercept) ++Nv;
int Ng=tree_st.Ng;
graph_st.Nv=Nv*N;
graph_st.Ng=Ng*(N+1);
T* weights=new T[graph_st.Ng];
for (int i = 0; i<N+1; ++i)
for (int j = 0; j<Ng; ++j)
weights[i*Ng+j]=tree_st.weights[j];
for (int j = 0; j<Ng; ++j)
weights[N*Ng+j]*=l1dl2;
graph_st.weights=weights;
int nzmax_tree=0;
for (int i = 0; i<Ng; ++i)
nzmax_tree += tree_st.N_own_variables[i];
int nzmax_v=nzmax_tree*N;
mwSize* gv_jc = new mwSize[graph_st.Ng+1];
mwSize* gv_ir = new mwSize[nzmax_v];
int count=0;
for (int i = 0; i<N; ++i) {
for (int j = 0; j<Ng; ++j) {
gv_jc[i*Ng+j]=count;
for (int k = 0; k<tree_st.N_own_variables[j]; ++k) {
gv_ir[gv_jc[i*Ng+j] + k] =Nv*i+tree_st.own_variables[j]+k;
++count;
}
}
}
for (int i = 0; i<Ng+1; ++i) {
gv_jc[N*Ng+i]=count;
}
graph_st.gv_jc=gv_jc;
graph_st.gv_ir=gv_ir;
mwSize* gg_jc = new mwSize[graph_st.Ng+1];
int nzmax_tree2=tree_st.groups_jc[Ng];
int nzmax2=nzmax_tree2*(N+1)+Ng*N;
mwSize* gg_ir = new mwSize[nzmax2];
count=0;
for (int i = 0; i<N; ++i) {
for (int j = 0; j<Ng; ++j) {
gg_jc[i*Ng+j] = count;
for (int k = tree_st.groups_jc[j]; k<static_cast<int>(tree_st.groups_jc[j+1]); ++k) {
gg_ir[count++] = i*Ng+tree_st.groups_ir[k];
}
}
}
for (int i = 0; i<Ng; ++i) {
gg_jc[N*Ng+i] = count;
for (int j = tree_st.groups_jc[i]; j<static_cast<int>(tree_st.groups_jc[i+1]); ++j) {
gg_ir[count++] = N*Ng+tree_st.groups_ir[j];
}
for (int j = 0; j<N; ++j) {
gg_ir[count++] = j*Ng+i;
}
}
gg_jc[(N+1)*Ng]=nzmax2;
graph_st.gg_jc=gg_jc;
graph_st.gg_ir=gg_ir;
// param.graph_st=&graph_st;
ParamReg<T> param_lasso = param;
param_lasso.graph_st=&graph_st;
this->_graphlasso = new GraphLasso<T>(param_lasso);
delete[](weights);
delete[](gv_ir);
delete[](gv_jc);
delete[](gg_ir);
delete[](gg_jc);
};
virtual void dummy() { };
virtual ~TreeMult() { };
};
template <typename T>
class GraphMult : public SpecGraphMat<T> {
public:
GraphMult(const ParamReg<T>& param) : SpecGraphMat<T>(param) {
const GraphStruct<T>& graph_st=*(param.graph_st);
const int N = param.num_cols;
const T l1dl2 = param.lambda2d1;
GraphStruct<T> g_st;
int Nv=graph_st.Nv;
int Ng=graph_st.Ng;
g_st.Nv=Nv*N;
g_st.Ng=Ng*(N+1);
T* weights=new T[g_st.Ng];
for (int i = 0; i<N+1; ++i)
for (int j = 0; j<Ng; ++j)
weights[i*Ng+j]=graph_st.weights[j];
for (int j = 0; j<Ng; ++j)
weights[N*Ng+j]*=l1dl2;
g_st.weights=weights;
int nzmax_graph=graph_st.gv_jc[Ng]; //just corrected to gv
int nzmax_v=nzmax_graph*N;
mwSize* gv_jc = new mwSize[g_st.Ng+1];
mwSize* gv_ir = new mwSize[nzmax_v];
int count=0;
for (int i = 0; i<N; ++i) {
for (int j = 0; j<Ng; ++j) {
gv_jc[i*Ng+j]=count;
for (mwSize k = graph_st.gv_jc[j]; k<graph_st.gv_jc[j+1]; ++k) {
gv_ir[count++] =Nv*i+graph_st.gv_ir[k];
}
}
}
for (int i = 0; i<Ng+1; ++i) {
gv_jc[N*Ng+i]=count;
}
g_st.gv_jc=gv_jc;
g_st.gv_ir=gv_ir;
mwSize* gg_jc = new mwSize[g_st.Ng+1];
int nzmax_tree2=graph_st.gg_jc[Ng];
int nzmax2=nzmax_tree2*(N+1)+Ng*N;
mwSize* gg_ir = new mwSize[nzmax2];
count=0;
for (int i = 0; i<N; ++i) {
for (int j = 0; j<Ng; ++j) {
gg_jc[i*Ng+j] = count;
for (mwSize k = graph_st.gg_jc[j]; k<graph_st.gg_jc[j+1]; ++k) {
gg_ir[count++] = i*Ng+graph_st.gg_ir[k];
}
}
}
for (int i = 0; i<Ng; ++i) {
gg_jc[N*Ng+i] = count;
for (int j = graph_st.gg_jc[i]; j<static_cast<int>(graph_st.gg_jc[i+1]); ++j) {
gg_ir[count++] = N*Ng+graph_st.gg_ir[j];
}
for (int j = 0; j<N; ++j) {
gg_ir[count++] = j*Ng+i;
}
}
gg_jc[(N+1)*Ng]=nzmax2;
g_st.gg_jc=gg_jc;
g_st.gg_ir=gg_ir;
ParamReg<T> param_lasso = param;
param_lasso.graph_st = &g_st;
this->_graphlasso = new GraphLasso<T>(param_lasso);
delete[](weights);
delete[](gv_ir);
delete[](gv_jc);
delete[](gg_ir);
delete[](gg_jc);
};
virtual void dummy() { };
virtual ~GraphMult() { };
};
template <typename T, typename D, typename E>
T duality_gap(Loss<T,D,E>& loss, Regularizer<T,D>& regularizer, const D& x,
const T lambda, T& best_dual, const bool verbose = false) {
if (!regularizer.is_fenchel() || !loss.is_fenchel()) {
cerr << "Error: no duality gap available" << endl;
exit(1);
}
T primal= loss.eval(x)+lambda*regularizer.eval(x);
bool intercept=regularizer.is_intercept();
D grad1, grad2;
loss.var_fenchel(x,grad1,grad2,intercept);
T dual;
grad2.scal(-T(1.0)/lambda);
T val=0;
T scal=1.0;
regularizer.fenchel(grad2,val,scal);
dual = -lambda*val;
grad1.scal(scal);
dual -= loss.fenchel(grad1);
dual = MAX(dual,best_dual);
T delta= primal == 0 ? 0 : (primal-dual)/abs<T>(primal);
if (verbose) {
cout << "Relative duality gap: " << delta << endl;
flush(cout);
}
best_dual=dual;
return delta;
}
template <typename T, typename D, typename E>
T duality_gap(Loss<T,D,E>& loss, Regularizer<T,D>& regularizer, const D& x,
const T lambda, const bool verbose = false) {
T best_dual=-INFINITY;
return duality_gap(loss,regularizer,x,lambda,best_dual,verbose);
}
template <typename T>
void dualityGraph(const Matrix<T>& X, const Matrix<T>& D, const Matrix<T>& alpha0,
Vector<T>& res, const ParamFISTA<T>& param,
const GraphStruct<T>* graph_st) {
Regularizer<T>* regularizer=new GraphLasso<T>(*graph_st,
param.intercept,param.resetflow,param.pos,param.clever);
Loss<T>* loss;
switch (param.loss) {
case SQUARE: loss=new SqLoss<T>(D); break;
case POISSON: loss=new PoissonLoss<T>(D,param.delta); break;
case LOG: loss = new LogLoss<T>(D); break;
case LOGWEIGHT: loss = new LogLoss<T,true>(D); break;
default: cerr << "Not implemented"; exit(1);
}
Vector<T> Xi;
X.refCol(0,Xi);
loss->init(Xi);
Vector<T> alpha0i;
alpha0.refCol(0,alpha0i);
regularizer->reset();
res[0]=loss->eval(alpha0i)+param.lambda*regularizer->eval(alpha0i);
res[1]=duality_gap(*loss,*regularizer,alpha0i,param.lambda);
delete(loss);
delete(regularizer);
}
template <typename T>
void writeLog(const int iter, const T time, const T primal, const T dual,
char* name) {
std::ofstream f;
f.precision(12);
f.flags(std::ios_base::scientific);
f.open(name, ofstream::app);
f << iter << " " << primal << " " << dual << " " << time << std::endl;
f.close();
};
template <typename T, typename D, typename E>
void subGradientDescent_Generic(Loss<T,D,E>& loss, Regularizer<T,D>& regularizer, const D& x0, D& x,
Vector<T>& optim_info,
const ParamFISTA<T>& param) {
D grad;
D sub_grad;
const T lambda=param.lambda;
const int it0 = MAX(1,param.it0);
const bool duality = loss.is_fenchel() && regularizer.is_fenchel();
optim_info.set(-1);
T best_dual=-INFINITY;
T rel_duality_gap=-INFINITY;
Timer time;
time.start();
int it;
for (it = 1; it<=param.max_it; ++it) {
/// print loss
if (param.verbose && ((it % it0) == 0)) {
time.stop();
T los=loss.eval(x) + lambda*regularizer.eval(x);
optim_info[0]=los;
T sec=time.getElapsed();
cout << "Iter: " << it << ", loss: " << los << ", time: " << sec << " ";
if (param.log)
writeLog(it,sec,los,best_dual,param.logName);
if (param.verbose)
cout << endl;
flush(cout);
time.start();
}
/// compute gradient
loss.grad(x,grad);
regularizer.sub_grad(x,sub_grad);
T step = param.sqrt_step ? param.a/(param.b+sqrt(static_cast<T>(it))) : param.a/(param.b+(static_cast<T>(it)));
x.add(grad,-step);
x.add(sub_grad,-lambda*step);
if (duality && ((it % it0) == 0)) {
time.stop();
rel_duality_gap=duality_gap(loss,regularizer,x,lambda,best_dual,param.verbose);
optim_info[1]=best_dual;
optim_info[2]=rel_duality_gap;
if (rel_duality_gap < param.tol) break;
time.start();
}
}
if ((it % it0) != 0 || !param.verbose) {
T los=loss.eval(x) + lambda*regularizer.eval(x);
optim_info[0]=los;
if (duality) {
rel_duality_gap=duality_gap(loss,regularizer,x,lambda,best_dual,param.verbose);
optim_info[1]=best_dual;
optim_info[2]=rel_duality_gap;
}
}
optim_info[3]=it;
}
template <typename T, typename D, typename E>
void ISTA_Generic(Loss<T,D,E>& loss, Regularizer<T,D>& regularizer, const D& x0, D& x, Vector<T>& optim_info,
const ParamFISTA<T>& param) {
const int it0 = MAX(1,param.it0);
const T lambda=param.lambda;
T L=param.L0;
x.copy(x0);
D grad, tmp, prox, old;
/// linesearch_mode =
/// 0: regular monotonic scheme
/// 1: regular monotonic scheme but restart at L0
/// 2: Barzilai-Borwein
/// 3: back_tracking in both directions
D sbb, xbb;
const T alphamax=10e30*1/L;
const T alphamin=10e-30*1/L;
const bool duality = loss.is_fenchel() && regularizer.is_fenchel();
const bool dc = regularizer.is_concave();
optim_info.set(-1);
Timer time;
time.start();
T rel_duality_gap=-INFINITY;
int it;
T best_dual=-INFINITY;
for (it = 1; it<=param.max_it; ++it) {
/// print loss
if (param.verbose && ((it % it0) == 0)) {
time.stop();
T los=loss.eval(x) + lambda*regularizer.eval(x);
optim_info[0]=los;
T sec=time.getElapsed();
cout << "Iter: " << it << ", loss: " << los << ", time: " << sec << ", L: " << L;
flush(cout);
if (param.log)
writeLog(it,sec,los,best_dual,param.logName);
time.start();
}
/// compute gradient
loss.grad(x,grad);
if (dc) regularizer.linearize(x);
if (param.linesearch_mode==2 && it > 1) {
sbb.sub(grad);
xbb.sub(x);
T alpha=sbb.dot(xbb)/sbb.nrm2sq();
alpha=MIN(MAX(alpha,alphamin),alphamax);
L=1/alpha;
}
if (param.linesearch_mode==1) L=param.L0;
int iter=1;
while (iter < param.max_iter_backtracking) {
prox.copy(x);
prox.add(grad,-T(1.0)/L);
regularizer.prox(prox,tmp,lambda/L);
if ((param.linesearch_mode==2 && it > 1) || param.fixed_step || loss.test_backtracking(x,grad,tmp,L)) {
break;
}
L *= param.gamma;
if (param.verbose && ((it % it0) == 0))
cout << " " << L;
++iter;
}
if (param.linesearch_mode==3 && iter==1 && !param.fixed_step) {
while (iter < param.max_iter_backtracking) {
L /= param.gamma;
prox.copy(x);
prox.add(grad,-T(1.0)/L);
regularizer.prox(prox,tmp,lambda/L);
if (!loss.test_backtracking(x,grad,tmp,L)) {
L *= param.gamma;
prox.copy(x);
prox.add(grad,-T(1.0)/L);
regularizer.prox(prox,tmp,lambda/L);
break;
}
if (param.verbose && ((it % it0) == 0))
cout << " " << L;
++iter;
}
}
if (param.verbose && ((it % it0) == 0))
cout << endl;
if (param.linesearch_mode==2) {
sbb.copy(grad);
xbb.copy(x);
}
old.copy(x);
x.copy(tmp);
if (duality) {
if ((it % it0) == 0) {
time.stop();
rel_duality_gap=duality_gap(loss,regularizer,x,lambda,best_dual,param.verbose);
optim_info[1]=best_dual;
optim_info[2]=rel_duality_gap;
if (rel_duality_gap < param.tol) break;
time.start();
}
} else {
old.sub(x);
if (sqrt(old.nrm2sq()/MAX(EPSILON,x.nrm2sq())) < param.tol) break;
}
}
T los=loss.eval(x) + lambda*regularizer.eval(x);
optim_info[0]=los;
T sec=time.getElapsed();
if (param.verbose) {
cout << "Iter: " << it << ", loss: " << los << ", time: " << sec << ", L: " << L << endl;
flush(cout);
}
if (duality) {
rel_duality_gap=duality_gap(loss,regularizer,x,lambda,best_dual,param.verbose);
optim_info[1]=best_dual;
optim_info[2]=rel_duality_gap;
}
optim_info[3]=it;
}
template <typename T, typename D, typename E>
void FISTA_Generic(Loss<T,D,E>& loss, Regularizer<T,D>& regularizer, const D& x0, D& x, Vector<T>& optim_info,
const ParamFISTA<T>& param) {
const int it0 = MAX(1,param.it0);
const T lambda=param.lambda;
T L=param.L0;
T t = 1.0;
T old_t;
D y, grad, prox, tmp;
y.copy(x0);
x.copy(x0);
D sbb, xbb;
const T alphamax=10e30*1/L;
const T alphamin=10e-30*1/L;
const bool duality = loss.is_fenchel() && regularizer.is_fenchel();
T rel_duality_gap=-INFINITY;
optim_info.set(-1);
Timer time;
time.start();
int it;
T best_dual=-INFINITY;
for (it = 1; it<=param.max_it; ++it) {
/// print loss
if (param.verbose && ((it % it0) == 0)) {
time.stop();
T los=loss.eval(x) + lambda*regularizer.eval(x);
optim_info[0]=los;
T sec=time.getElapsed();
cout << "Iter: " << it << ", loss: " << los << ", time: " << sec << ", L: " << L;
flush(cout);
if (param.log)
writeLog(it,sec,los,best_dual,param.logName);
time.start();
}
/// compute gradient
loss.grad(y,grad);
if (param.linesearch_mode==2) {
if (it > 1) {
sbb.sub(grad);
xbb.sub(y);
T alpha=sbb.dot(xbb)/sbb.nrm2sq();
alpha=MIN(MAX(alpha,alphamin),alphamax);
L=1/alpha;
}
sbb.copy(grad);
xbb.copy(y);
}
int iter=1;
while (iter < param.max_iter_backtracking) {
prox.copy(y);
prox.add(grad,-T(1.0)/L);
regularizer.prox(prox,tmp,lambda/L);
if ((param.linesearch_mode==2 && it > 1) || param.fixed_step || loss.test_backtracking(y,grad,tmp,L)) break;
L *= param.gamma;
if (param.verbose && ((it % it0) == 0))
cout << " " << L;
++iter;
}
if (param.verbose && ((it % it0) == 0))
cout << endl;
prox.copy(x);
prox.sub(tmp);
x.copy(tmp);
old_t=t;
t=(1.0+sqrt(1+4*t*t))/2;
y.copy(x);
y.add(prox,(1-old_t)/t);
if (duality) {
if ((it % it0) == 0) {
time.stop();
rel_duality_gap=duality_gap(loss,regularizer,x,lambda,best_dual,param.verbose);
optim_info[1]=best_dual;
optim_info[2]=rel_duality_gap;
if (rel_duality_gap < param.tol) break;
time.start();
}
} else {
if (sqrt(prox.nrm2sq()/MAX(EPSILON,x.nrm2sq())) < param.tol) break;
}
}
T los=loss.eval(x) + lambda*regularizer.eval(x);
optim_info[0]=los;
T sec=time.getElapsed();
if (param.verbose) {
cout << "Iter: " << it << ", loss: " << los << ", time: " << sec << ", L: " << L << endl;
flush(cout);
}
if (duality) {
rel_duality_gap=duality_gap(loss,regularizer,x,lambda,best_dual,param.verbose);
optim_info[1]=best_dual;
optim_info[2]=rel_duality_gap;
}
optim_info[3]=it;
};
template <typename T>
T LagrangianADMM(const SplittingFunction<T, Matrix<T> >& loss, const SplittingFunction<T, SpMatrix<T> >& reg,
const T lambda, const T gamma, const Vector<T>& w, const Matrix<T>& splitted_loss, const SpMatrix<T>& splitted_reg,
const Matrix<T>& multi_loss, const SpMatrix<T>& multi_reg, T& los, const T* weights = NULL) {
const int n_reg=reg.num_components();
//T loss_val = loss.eval(w) + lambda*reg.eval(w);
T lagrangian = loss.eval_split(splitted_loss) + lambda*reg.eval_split(splitted_reg);
Matrix<T> tmp;
tmp.copy(splitted_loss);
tmp.addVecToCols(w,-T(1.0));
T add =0.5*gamma*tmp.normFsq();
lagrangian += add;
los+=add;
if (n_reg > 0) {
SpMatrix<T> stmp;
stmp.copy(splitted_reg);
stmp.addVecToCols(w,-T(1.0));
add=0.5*gamma*stmp.normFsq();
lagrangian += add;
los+=add;
lagrangian -= multi_reg.dot_direct(stmp);
}
lagrangian -= multi_loss.dot(tmp);
return lagrangian;
};
template <typename T>
void update_multipliers_ADMM(Vector<T>& w,
const Matrix<T>& splitted_w_loss,
const Matrix<T>& multipliers_w_loss,
const SpMatrix<T>& splitted_w_reg,
const SpMatrix<T>& multipliers_w_reg,
const T gamma) {
Vector<T> mean(w.n());
splitted_w_loss.sum_cols(mean);
w.copy(mean);
multipliers_w_loss.sum_cols(mean);
w.add(mean,-T(1.0)/gamma);
Vector<T> number_occurences(w.n());
number_occurences.set(splitted_w_loss.n());
const int n_reg=splitted_w_reg.n();
if (n_reg > 0) {
SpVector<T> col;
mean.setZeros();
for (int i = 0; i<n_reg; ++i) {
splitted_w_reg.refCol(i,col);
mean.add(col);
for (int j = 0; j<col.L(); ++j)
number_occurences[col.r(j)]++;
}
w.add(mean);
mean.setZeros();
for (int i = 0; i<n_reg; ++i) {
multipliers_w_reg.refCol(i,col);
mean.add(col);
}
w.add(mean,-T(1.0)/gamma);
};
w.div(number_occurences);
};
template <typename T>
void update_multipliers_weighted_ADMM(Vector<T>& w,
const Matrix<T>& splitted_w_loss,
const Matrix<T>& multipliers_w_loss,
const SpMatrix<T>& splitted_w_reg,
const SpMatrix<T>& multipliers_w_reg,
const T gamma, const T* inner_weights) {
Vector<T> mean(w.n());
splitted_w_loss.sum_cols(mean);
w.copy(mean);
multipliers_w_loss.sum_cols(mean);
w.add(mean,-T(1.0)/gamma);
Vector<T> number_occurences(w.n());
number_occurences.set(splitted_w_loss.n());
const int n_reg=splitted_w_reg.n();
if (n_reg > 0) {
SpVector<T> col;
mean.setZeros();
for (int i = 0; i<n_reg; ++i) {
splitted_w_reg.refCol(i,col);
for (int j = 0; j<col.L(); ++j) {
mean[col.r(j)]+=inner_weights[j]*col.v(j);
number_occurences[col.r(j)]+=inner_weights[j]*inner_weights[j];
}
}
w.add(mean);
mean.setZeros();
for (int i = 0; i<n_reg; ++i) {
multipliers_w_reg.refCol(i,col);
for (int j = 0; j<col.L(); ++j)
mean[col.r(j)]+=inner_weights[j]*col.v(j);
}
w.add(mean,-T(1.0)/gamma);
};
w.div(number_occurences);
};
template <typename T>
void ADMM(const SplittingFunction<T, Matrix<T> >& loss, const SplittingFunction<T, SpMatrix<T> >& reg,
const Vector<T>& w0, Vector<T>& w, Vector<T>& optim_info,
const ParamFISTA<T>& param) {
const T gamma = param.c;
const int n_reg=reg.num_components();
const int it0 = MAX(1,param.it0);
const T lambda=param.lambda;
w.copy(w0);
Matrix<T> splitted_w_loss;
SpMatrix<T> splitted_w_reg;
Matrix<T> multipliers_w_loss;
SpMatrix<T> multipliers_w_reg;
loss.init_split_variables(multipliers_w_loss);
reg.init_split_variables(multipliers_w_reg);
splitted_w_loss.copy(multipliers_w_loss);
splitted_w_loss.addVecToCols(w);
if (n_reg > 0) {
splitted_w_reg.copy(multipliers_w_reg);
splitted_w_reg.addVecToCols(w);
}
Timer time;
time.start();
int it=0;
T los = INFINITY;
T old_los=INFINITY;
for (it = 0; it<param.max_it; ++it) {
if (((it % it0) == 0)) {
time.stop();
if (param.is_inner_weights) {
los= loss.eval(w)+lambda*reg.eval_weighted(w,splitted_w_reg,
param.inner_weights);
} else {
los= loss.eval(w)+lambda*reg.eval(w);
}
optim_info[0]=los;
T sec=time.getElapsed();
optim_info[2]=sec;
if (param.verbose) {
cout << "Iter: " << it << ", loss: " << los << ", time: " << sec << endl;
flush(cout);
if (param.log)
writeLog(it,sec,los,T(0),param.logName);
}
time.start();
}
if (param.is_inner_weights) {
/// update w
update_multipliers_weighted_ADMM(w,splitted_w_loss,multipliers_w_loss,splitted_w_reg,multipliers_w_reg,gamma,param.inner_weights);
/// update the splitting variables
splitted_w_loss.copy(multipliers_w_loss);
splitted_w_loss.scal((1.0)/gamma);
splitted_w_loss.addVecToCols(w);
loss.prox_split(splitted_w_loss,T(1.0)/gamma);
if (n_reg > 0) {
splitted_w_reg.copy(multipliers_w_reg);
splitted_w_reg.scal((1.0)/gamma);
splitted_w_reg.addVecToColsWeighted(w,param.inner_weights);
reg.prox_split(splitted_w_reg,lambda/gamma);
}
/// update multipliers
multipliers_w_loss.addVecToCols(w,gamma);
multipliers_w_loss.add(splitted_w_loss,-gamma);
if (n_reg > 0) {
multipliers_w_reg.addVecToColsWeighted(w,param.inner_weights,
gamma);
multipliers_w_reg.add_direct(splitted_w_reg,-gamma);
}
} else {
/// update w
update_multipliers_ADMM(w,splitted_w_loss,multipliers_w_loss,splitted_w_reg,multipliers_w_reg,gamma);
/// update the splitting variables
splitted_w_loss.copy(multipliers_w_loss);
splitted_w_loss.scal((1.0)/gamma);
splitted_w_loss.addVecToCols(w);
loss.prox_split(splitted_w_loss,T(1.0)/gamma);
if (n_reg > 0) {
splitted_w_reg.copy(multipliers_w_reg);
splitted_w_reg.scal((1.0)/gamma);
splitted_w_reg.addVecToCols(w);
reg.prox_split(splitted_w_reg,lambda/gamma);
}
/// update multipliers
multipliers_w_loss.addVecToCols(w,gamma);
multipliers_w_loss.add(splitted_w_loss,-gamma);
if (n_reg > 0) {
multipliers_w_reg.addVecToCols(w,gamma);
multipliers_w_reg.add_direct(splitted_w_reg,-gamma);
}
}
/// stopping criterion
if ((it % it0) == 0) {
if (it > 0 && (old_los-los)/old_los < param.tol) break;
old_los=los;
}
}
if (param.is_inner_weights) {
los= loss.eval(w)+lambda*reg.eval_weighted(w,splitted_w_reg,
param.inner_weights);
} else {
los= loss.eval(w)+lambda*reg.eval(w);
}
optim_info[0]=los;
optim_info[3]=it;
};
template <typename T>
void update_multipliers_LinADMM(Vector<T>& w,
const SpMatrix<T>& splitted_w_reg,
const SpMatrix<T>& multipliers_w_reg,
const T gamma, const T delta) {
Vector<T> mean(w.n());
Vector<T> number_occurences(w.n());
number_occurences.set(delta);
const int n_reg=splitted_w_reg.n();
if (n_reg > 0) {
SpVector<T> col;
mean.setZeros();
for (int i = 0; i<n_reg; ++i) {
splitted_w_reg.refCol(i,col);
mean.add(col);
for (int j = 0; j<col.L(); ++j)
number_occurences[col.r(j)]+=gamma;
}
mean.scal(gamma);
for (int i = 0; i<n_reg; ++i) {
multipliers_w_reg.refCol(i,col);
mean.add(col);
}
w.add(mean);
};
w.div(number_occurences);
};
template <typename T>
void LinADMM(const SplittingFunction<T, Matrix<T> >& loss, const SplittingFunction<T, SpMatrix<T> >& reg,
const Vector<T>& w0, Vector<T>& w, Vector<T>& optim_info,
const ParamFISTA<T>& param) {
const T gamma = param.c;
const int n_reg=reg.num_components();
const int it0 = MAX(1,param.it0);
const T lambda=param.lambda;
w.copy(w0);
SpMatrix<T> primal_reg;
SpMatrix<T> dual_reg;
reg.init_split_variables(dual_reg);
if (n_reg > 0) {
primal_reg.copy(dual_reg);
primal_reg.addVecToCols(w);
}
Vector<T> prim_loss;
loss.init_prim_var(prim_loss);
Vector<T> dual_loss;
dual_loss.copy(prim_loss);
Timer time;
time.start();
int it=0;
T los = INFINITY;
T old_los=INFINITY;
for (it = 0; it<param.max_it; ++it) {
/*w.print("w");
prim_loss.print("z");
dual_loss.print("nu");
primal_reg.print("zg");
dual_reg.print("nug");*/
if (((it % it0) == 0)) {
time.stop();
los= loss.eval(w)+lambda*reg.eval(w);
optim_info[0]=los;
T sec=time.getElapsed();
optim_info[2]=sec;
if (param.verbose) {
cout << "Iter: " << it << ", loss: " << los << ", time: " << sec << endl;
flush(cout);
if (param.log)
writeLog(it,sec,los,T(0),param.logName);
}
time.start();
}
/// update primal_loss variables
loss.prox_prim_var(prim_loss,dual_loss,w,gamma);
/// update primal_reg variables
if (n_reg > 0) {
primal_reg.copy(dual_reg);
primal_reg.scal(-(1.0)/gamma);
primal_reg.addVecToCols(w);
reg.prox_split(primal_reg,lambda/gamma);
}
/// update w
loss.compute_new_prim(w,prim_loss,dual_loss,gamma,param.delta);
update_multipliers_LinADMM(w,primal_reg,dual_reg,gamma,param.delta);
/// update multipliers
if (n_reg > 0) {
dual_reg.addVecToCols(w,-gamma);
dual_reg.add_direct(primal_reg,gamma);
}
loss.add_mult_design_matrix(w,dual_loss,-gamma);
dual_loss.add(prim_loss,gamma);
/// stopping criterion
if ((it % it0) == 0) {
if (it > 0 && (old_los-los)/old_los < param.tol) break;
old_los=los;
}
}
los= loss.eval(w)+lambda*reg.eval(w);
optim_info[0]=los;
optim_info[3]=it;
};
template <typename T>
SplittingFunction<T, SpMatrix<T> >* setRegularizerADMM(const ParamFISTA<T>& param,
const GraphStruct<T>* graph_st = NULL,
const TreeStruct<T>* tree_st = NULL) {
SplittingFunction<T, SpMatrix<T> >* reg;
ParamReg<T> param_reg;
param_reg.pos=param.pos;
param_reg.intercept=param.intercept;
param_reg.tree_st=const_cast<TreeStruct<T>* >(tree_st);
param_reg.graph_st=const_cast<GraphStruct<T>* >(graph_st);
param_reg.resetflow=param.resetflow;
param_reg.clever=param.clever;
switch (param.regul) {
case GRAPH: param_reg.linf=true; reg=new GraphLasso<T>(param_reg); break;
case GRAPH_L2: param_reg.linf=false; reg=new GraphLasso<T>(param_reg); break;
case NONE: reg=new None<T>(param_reg); break;
default: cerr << "Not implemented"; exit(1);
}
return reg;
};
template <typename T>
Regularizer<T>* setRegularizerVectors(const ParamFISTA<T>& param,
const GraphStruct<T>* graph_st = NULL,
const TreeStruct<T>* tree_st = NULL,
const GraphPathStruct<T>* graph_path_st=NULL) {
ParamReg<T> param_reg;
param_reg.pos=param.pos;
param_reg.intercept=param.intercept;
param_reg.lambda=param.lambda;
param_reg.lambda2d1=param.lambda2/param.lambda;
param_reg.lambda3d1=param.lambda3/param.lambda;
param_reg.size_group=param.size_group;
param_reg.tree_st=const_cast<TreeStruct<T>* >(tree_st);
param_reg.graph_st=const_cast<GraphStruct<T>* >(graph_st);
param_reg.graph_path_st=const_cast<GraphPathStruct<T>* >(graph_path_st);
param_reg.resetflow=param.resetflow;
param_reg.clever=param.clever;
param_reg.ngroups=param.ngroups;
param_reg.groups=param.groups;
Regularizer<T>* reg;
switch (param.regul) {
case L0: reg=new Lzero<T>(param_reg); break;
case LOG_DC: param_reg.lambda2d1=param.a; reg=new LogDC<T>(param_reg); break;
case L1: reg=new Lasso<T>(param_reg); break;
case L1CONSTRAINT: reg=new LassoConstraint<T>(param_reg); break;
case L2: reg=new normL2<T>(param_reg); break;
case LINF: reg=new normLINF<T>(param_reg); break;
case RIDGE: reg=new Ridge<T>(param_reg); break;
case ELASTICNET: reg=new typename ElasticNet<T>::type(param_reg); break;
case FUSEDLASSO: reg=new FusedLasso<T>(param_reg); break;
case TREE_L0: reg=new TreeLzero<T>(param_reg); break;
case TREE_L2: param_reg.linf=false; reg=new TreeLasso<T>(param_reg); break;
case TREE_LINF: param_reg.linf=true; reg=new TreeLasso<T>(param_reg); break;
case GRAPH: param_reg.linf=true; reg=new GraphLasso<T>(param_reg); break;
case GRAPH_RIDGE: param_reg.linf=true; reg=new typename GraphLassoRidge<T>::type(param_reg); break;
case GRAPH_L2: param_reg.linf=false; reg=new GraphLasso<T>(param_reg); break;
case TRACE_NORM_VEC: reg=new ProxMatToVec<T, TraceNorm<T> >(param_reg); break;
case RANK_VEC: reg=new ProxMatToVec<T, Rank<T> >(param_reg); break;
case GROUPLASSO_L2: reg=new typename GroupLassoL2<T>::type(param_reg); break;
case GROUPLASSO_LINF: reg=new typename GroupLassoLINF<T>::type(param_reg); break;
case GROUPLASSO_L2_L1: reg=new typename GroupLassoL2_L1<T>::type(param_reg); break;
case GROUPLASSO_LINF_L1: reg=new typename GroupLassoLINF_L1<T>::type(param_reg); break;
case GRAPH_PATH_L0: reg = new GraphPathL0<T>(param_reg); break;
case GRAPH_PATH_CONV: reg = new GraphPathConv<T>(param_reg); break;
case NONE: reg=new None<T>(param_reg); break;
default: cerr << "Not implemented"; exit(1);
}
return reg;
};
template <typename T>
Regularizer<T, Matrix<T> >* setRegularizerMatrices(const ParamFISTA<T>& param,
const int m, const int n,
const GraphStruct<T>* graph_st = NULL,
const TreeStruct<T>* tree_st = NULL,
const GraphPathStruct<T>* graph_path_st=NULL) {
ParamReg<T> param_reg;
param_reg.transpose=param.transpose;
param_reg.pos=param.pos;
param_reg.intercept=param.intercept;
param_reg.lambda2d1=param.lambda2/param.lambda;
param_reg.lambda3d1=param.lambda3/param.lambda;
param_reg.size_group=param.size_group;
param_reg.num_cols=param.transpose ? m : n;
param_reg.tree_st=const_cast<TreeStruct<T>* >(tree_st);
param_reg.graph_st=const_cast<GraphStruct<T>* >(graph_st);
param_reg.resetflow=param.resetflow;
param_reg.clever=param.clever;
Regularizer<T, Matrix<T> >* reg;
switch (param.regul) {
case L0: reg=new RegMat<T, Lzero<T> >(param_reg); break;
case L1: reg=new RegMat<T, Lasso<T> >(param_reg); break;
case L1CONSTRAINT: reg=new RegMat<T, LassoConstraint<T> >(param_reg); break;
case L2: reg=new RegMat<T, normL2<T> >(param_reg); break;
case LINF: reg=new RegMat<T, normLINF<T> >(param_reg); break;
case RIDGE: reg=new RegMat<T, Ridge<T> >(param_reg); break;
case ELASTICNET: reg=new RegMat<T, typename ElasticNet<T>::type >(param_reg); break;
case FUSEDLASSO: reg=new RegMat<T, FusedLasso<T> >(param_reg); break;
case L1L2: reg=new MixedL1L2<T>(param_reg); break;
case L1LINF: reg=new MixedL1LINF<T>(param_reg); break;
case TRACE_NORM: reg=new TraceNorm<T>(param_reg); break;
case RANK: reg=new Rank<T>(param_reg); break;
case L1L2_L1: reg=new typename MixedL1L2_L1<T>::type(param_reg); break;
case L1LINF_L1: reg=new typename MixedL1LINF_L1<T>::type(param_reg); break;
case TREE_L0: reg=new RegMat<T, TreeLzero<T> >(param_reg); break;
case TREE_L2: param_reg.linf=false; reg=new RegMat<T, TreeLasso<T> >(param_reg); break;
case TREE_LINF: param_reg.linf=true; reg=new RegMat<T, TreeLasso<T> >(param_reg); break;
case GRAPH: reg=new RegMat<T, GraphLasso<T> >(param_reg); break;
case TREEMULT: reg = new TreeMult<T>(param_reg); break;
case GRAPHMULT: reg=new GraphMult<T>(param_reg); break;
case L1LINFCR: reg = new MixedL1LINFCR<T>(m,param_reg); break;
case GRAPH_PATH_L0: reg = new RegMat<T, GraphPathL0<T> >(param_reg); break;
case GRAPH_PATH_CONV: reg = new RegMat<T, GraphPathConv<T> >(param_reg); break;
case NONE: reg=new RegMat<T, None<T> >(param_reg); break;
default: cerr << "not implemented"; exit(1);
}
return reg;
}
template <typename T>
void print_info_solver(const ParamFISTA<T>& param) {
if (param.verbose) {
print_loss(param.loss);
print_regul(param.regul);
if (param_for_admm(param)) {
if (param.admm || param.lin_admm) {
if (param.lin_admm) {
cout << "Linearized ADMM algorithm" << endl;
} else {
cout << "ADMM algorithm" << endl;
}
}
} else {
if (param.ista) {
cout << "ISTA algorithm" << endl;
} else if (param.subgrad) {
cout << "Subgradient descent" << endl;
} else {
cout << "FISTA algorithm" << endl;
}
if ((param.regul == GRAPH || param.regul == TREEMULT ||
param.regul == GRAPHMULT || param.regul==L1LINFCR) &&
param.clever)
cout << "Projections with arc capacities" << endl;
if (param.intercept) cout << "with intercept" << endl;
if (param.pos) cout << "Non-negativity constraints" << endl;
if (param.log && param.logName) {
cout << "log activated " << endl;
cout << param.logName << endl;
cout << endl;
}
}
flush(cout);
}
};
template <typename T>
void solver_admm(const Matrix<T>& X, const Matrix<T>& alpha0,
Matrix<T>& alpha, Matrix<T>& optim_info, SplittingFunction<T, SpMatrix<T> >** regularizers,
SplittingFunction<T, Matrix<T> >** losses, const ParamFISTA<T>& param) {
const int M = X.n();
optim_info.resize(4,M);
int i1;
#pragma omp parallel for private(i1)
for (i1 = 0; i1< M; ++i1) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i1,Xi);
losses[numT]->init(Xi);
Vector<T> alpha0i;
alpha0.refCol(i1,alpha0i);
Vector<T> alphai;
alpha.refCol(i1,alphai);
regularizers[numT]->reset();
Vector<T> optim_infoi;
optim_info.refCol(i1,optim_infoi);
if (param.admm || param.lin_admm) {
if (param.lin_admm) {
LinADMM(*(losses[numT]),*(regularizers[numT]),alpha0i,alphai,optim_infoi,param);
} else {
ADMM(*(losses[numT]),*(regularizers[numT]),alpha0i,alphai,optim_infoi,param);
}
}
}
}
template <typename T>
void solver_aux1(const Matrix<T>& X, const Matrix<T>& alpha0,
Matrix<T>& alpha, Matrix<T>& optim_info, Regularizer<T, Vector<T> >** regularizers,
Loss<T, Vector<T> >** losses, const ParamFISTA<T>& param) {
const int M = X.n();
if (param.verbose) {
const bool duality = losses[0]->is_fenchel() && regularizers[0]->is_fenchel();
if (duality) cout << "Duality gap via Fenchel duality" << endl;
if (!param.ista && param.subgrad && !regularizers[0]->is_subgrad()) {
cerr << "Subgradient algorithm is not implemented for this combination of loss/regularization" << endl;
exit(1);
}
cout << "Timings reported do not include loss and fenchel evaluation" << endl;
flush(cout);
}
optim_info.resize(4,M);
int i1;
#pragma omp parallel for private(i1)
for (i1 = 0; i1< M; ++i1) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i1,Xi);
losses[numT]->init(Xi);
Vector<T> alpha0i;
alpha0.refCol(i1,alpha0i);
Vector<T> alphai;
alpha.refCol(i1,alphai);
regularizers[numT]->reset();
Vector<T> optim_infoi;
optim_info.refCol(i1,optim_infoi);
if (param.ista) {
ISTA_Generic(*(losses[numT]),*(regularizers[numT]),alpha0i,alphai,optim_infoi,param);
} else if (param.subgrad) {
subGradientDescent_Generic(*(losses[numT]),*(regularizers[numT]),alpha0i,alphai,optim_infoi,param);
} else {
FISTA_Generic(*(losses[numT]),*(regularizers[numT]),alpha0i,alphai,optim_infoi,param);
}
}
}
template <typename T>
void solver_aux2(const Matrix<T>& X, const Matrix<T>& alpha0,
Matrix<T>& alpha, Matrix<T>& optim_info, Regularizer<T, Matrix<T> >** regularizers,
Loss<T, Matrix<T> >** losses, const ParamFISTA<T>& param) {
const int M = X.n();
if (param.verbose) {
const bool duality = losses[0]->is_fenchel() && regularizers[0]->is_fenchel();
if (duality) cout << "Duality gap via Fenchel duality" << endl;
flush(cout);
}
optim_info.resize(4,M);
int i2;
#pragma omp parallel for private(i2)
for (i2 = 0; i2< M; ++i2) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> Xi;
X.refCol(i2,Xi);
losses[numT]->init(Xi);
const int N = alpha0.n()/X.n();
Matrix<T> alpha0i;
alpha0.refSubMat(i2*N,N,alpha0i);
Matrix<T> alphai;
alpha.refSubMat(i2*N,N,alphai);
regularizers[numT]->reset();
Vector<T> optim_infoi;
optim_info.refCol(i2,optim_infoi);
if (param.ista) {
ISTA_Generic(*(losses[numT]),*(regularizers[numT]),alpha0i,alphai,optim_infoi,param);
} else if (param.subgrad) {
subGradientDescent_Generic(*(losses[numT]),*(regularizers[numT]),alpha0i,alphai,optim_infoi,param);
} else {
FISTA_Generic(*(losses[numT]),*(regularizers[numT]),alpha0i,alphai,optim_infoi,param);
}
}
}
/// AbstractMatrixB is basically either SpMatrix or Matrix
template <typename T>
void solver(const Matrix<T>& X, const AbstractMatrixB<T>& D, const Matrix<T>& alpha0,
Matrix<T>& alpha, const ParamFISTA<T>& param1, Matrix<T>& optim_info,
const GraphStruct<T>* graph_st = NULL,
const TreeStruct<T>* tree_st = NULL,
const GraphPathStruct<T>* graph_path_st=NULL) {
print_info_solver(param1);
int num_threads=MIN(X.n(),param1.num_threads);
num_threads=init_omp(num_threads);
ParamFISTA<T> param=param1;
param.copied=true;
if (param.loss==POISSON) {
param.intercept=false;
param.pos=true;
}
if (param_for_admm(param)) {
if (num_threads > 1) param.verbose=false;
SplittingFunction<T>** losses = new SplittingFunction<T>*[num_threads];
SplittingFunction<T, SpMatrix<T> >** regularizers= new SplittingFunction<T, SpMatrix<T> >*[num_threads];
for (int i = 0; i<num_threads; ++i) {
regularizers[i]=setRegularizerADMM(param,graph_st,tree_st);
switch (param.loss) {
case SQUARE: losses[i]=new SqLoss<T>(D); break;
case HINGE: losses[i] = new HingeLoss<T>(D); break;
default: cerr << "Not implemented" << endl; exit(1);
}
}
solver_admm(X, alpha0, alpha, optim_info, regularizers,losses,param);
for (int i = 0; i<num_threads; ++i) {
delete(losses[i]);
delete(regularizers[i]);
}
delete[](losses);
delete[](regularizers);
} else {
Matrix<T> G;
if (param.loss==HINGE) {
cerr << "Loss only implemented for ADMM" << endl;
return;
}
if (param.compute_gram && (param.loss==SQUARE)) D.XtX(G);
if (!loss_for_matrices(param.loss) && !(param.transpose || regul_for_matrices(param.regul))) {
if (num_threads > 1) param.verbose=false;
Loss<T>** losses = new Loss<T>*[num_threads];
Regularizer<T>** regularizers= new Regularizer<T>*[num_threads];
for (int i = 0; i<num_threads; ++i) {
regularizers[i]=setRegularizerVectors(param,graph_st,tree_st,graph_path_st);
switch (param.loss) {
case SQUARE: if (param.compute_gram) {
losses[i]=new SqLoss<T>(D,G);
} else {
losses[i]=new SqLoss<T>(D);
}
break;
case POISSON: losses[i]=new PoissonLoss<T>(D,param.delta); break;
case SQUARE_MISSING: losses[i]=new SqLossMissing<T>(D); break;
case LOG: losses[i] = new LogLoss<T>(D); break;
case LOGWEIGHT: losses[i] = new LogLoss<T,true>(D); break;
default: cerr << "Not implemented"; exit(1);
}
}
solver_aux1(X, alpha0, alpha, optim_info, regularizers,losses,param);
for (int i = 0; i<num_threads; ++i) {
delete(losses[i]);
losses[i]=NULL;
delete(regularizers[i]);
regularizers[i]=NULL;
}
delete[](losses);
delete[](regularizers);
} else if (loss_for_matrices(param.loss) && param.loss != CUR) {
if (num_threads > 1) param.verbose=false;
Loss<T, Matrix<T> >** losses = new Loss<T, Matrix<T> >*[num_threads];
Regularizer<T, Matrix<T> >** regularizers= new Regularizer<T, Matrix<T> >*[num_threads];
const int N = alpha0.n()/X.n();
for (int i = 0; i<num_threads; ++i) {
regularizers[i]=setRegularizerMatrices(param,alpha0.m(),N,graph_st,tree_st,graph_path_st);
switch (param.loss) {
case MULTILOG: losses[i] = new MultiLogLoss<T>(D); break;
default: cerr << "Not implemented"; exit(1);
}
}
solver_aux2(X, alpha0, alpha, optim_info, regularizers,losses,param);
for (int i = 0; i<num_threads; ++i) {
delete(losses[i]);
losses[i]=NULL;
delete(regularizers[i]);
regularizers[i]=NULL;
}
delete[](losses);
delete[](regularizers);
} else {
/// (loss not for matrices and regul for matrices) or CUR
Loss<T, Matrix<T>, Matrix<T> >* loss;
Regularizer<T, Matrix<T> >* regularizer;
switch (param.loss) {
case SQUARE: if (param.compute_gram) {
loss=new SqLossMat<T>(D,G);
} else {
loss=new SqLossMat<T>(D);
}
break;
case POISSON: loss=new LossMat<T, PoissonLoss<T> >(X.n(),D,param.delta); break;
case SQUARE_MISSING: loss=new LossMat<T, SqLossMissing<T> >(X.n(),D); break;
case LOG: loss = new LossMat<T, LogLoss<T,false> >(X.n(),D); break;
case LOGWEIGHT: loss = new LossMat<T, LogLoss<T,true> >(X.n(),D); break;
case CUR: loss = new LossCur<T>(D); break;
default: cerr << "Not implemented"; exit(1);
}
regularizer=setRegularizerMatrices(param,alpha0.m(),alpha0.n(),graph_st,tree_st,graph_path_st);
if (param.verbose) {
const bool duality = loss->is_fenchel() && regularizer->is_fenchel();
if (duality) cout << "Duality gap via Fenchel duality" << endl;
}
loss->init(X);
optim_info.resize(4,1);
Vector<T> optim_infoi;
optim_info.refCol(0,optim_infoi);
if (param.ista) {
ISTA_Generic(*loss,*regularizer,alpha0,alpha,optim_infoi,param);
} else if (param.subgrad) {
subGradientDescent_Generic(*loss,*regularizer,alpha0,alpha,optim_infoi,param);
} else {
FISTA_Generic(*loss,*regularizer,alpha0,alpha,optim_infoi,param);
}
delete(regularizer);
delete(loss);
}
}
};
template <typename T>
void PROX(const Matrix<T>& alpha0,
Matrix<T>& alpha, const ParamFISTA<T>& param,
Vector<T>& val_loss,
const GraphStruct<T>* graph_st = NULL,
const TreeStruct<T>* tree_st = NULL,
const GraphPathStruct<T>* graph_path_st = NULL) {
if (param.verbose) {
print_regul(param.regul);
if ((param.regul == GRAPH || param.regul == TREEMULT ||
param.regul == GRAPHMULT || param.regul==L1LINFCR) &&
param.clever)
cout << "Projections with arc capacities" << endl;
if (param.intercept) cout << "with intercept" << endl;
flush(cout);
}
int num_threads=MIN(alpha.n(),param.num_threads);
num_threads=init_omp(num_threads);
const int M = alpha.n();
if (!graph_st && param.regul==GRAPH) {
cerr << "Graph structure should be provided" << endl;
return;
}
if (!regul_for_matrices(param.regul)) {
Regularizer<T>** regularizers= new Regularizer<T>*[num_threads];
for (int i = 0; i<num_threads; ++i)
regularizers[i]=setRegularizerVectors(param,graph_st,tree_st,graph_path_st);
int i;
if (param.eval)
val_loss.resize(M);
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> alpha0i;
alpha0.refCol(i,alpha0i);
Vector<T> alphai;
alpha.refCol(i,alphai);
regularizers[numT]->reset();
regularizers[numT]->prox(alpha0i,alphai,param.lambda);
if (param.eval)
val_loss[i]=regularizers[numT]->eval(alphai);
}
for (i = 0; i<num_threads; ++i) {
delete(regularizers[i]);
regularizers[i]=NULL;
}
delete[](regularizers);
} else {
/// regul for matrices
if (param.eval)
val_loss.resize(1);
Regularizer<T, Matrix<T> >* regularizer;
regularizer=setRegularizerMatrices(param,alpha0.m(),alpha0.n(),graph_st,tree_st,graph_path_st);
regularizer->prox(alpha0,alpha,param.lambda);
if (param.eval)
val_loss[0]=regularizer->eval(alpha);
delete(regularizer);
}
};
template <typename T>
void EvalGraphPath(const Matrix<T>& alpha0,
const ParamFISTA<T>& param,
Vector<T>& val_loss,
const GraphPathStruct<T>* graph_path_st,
SpMatrix<T>* paths = NULL) {
if (param.verbose) {
print_regul(param.regul);
if (param.intercept) cout << "with intercept" << endl;
if (param.eval_dual_norm) cout << "Evaluate the dual norm only" << endl;
flush(cout);
}
int num_threads=MIN(alpha0.n(),param.num_threads);
num_threads=init_omp(num_threads);
const int M = alpha0.n();
if (!regul_for_matrices(param.regul)) {
Regularizer<T>** regularizers= new Regularizer<T>*[num_threads];
for (int i = 0; i<num_threads; ++i)
regularizers[i]=setRegularizerVectors<T>(param,NULL,NULL,graph_path_st);
int i;
val_loss.resize(M);
#pragma omp parallel for private(i)
for (i = 0; i< M; ++i) {
#ifdef _OPENMP
int numT=omp_get_thread_num();
#else
int numT=0;
#endif
Vector<T> alphai;
alpha0.refCol(i,alphai);
regularizers[numT]->reset();
if (i==0 && paths) {
if (param.eval_dual_norm) {
val_loss[i]=regularizers[numT]->eval_dual_norm_paths(alphai,*paths);
} else {
val_loss[i]=regularizers[numT]->eval_paths(alphai,*paths);
}
} else {
if (param.eval_dual_norm) {
val_loss[i]=regularizers[numT]->eval_dual_norm(alphai);
} else {
val_loss[i]=regularizers[numT]->eval(alphai);
}
}
}
for (i = 0; i<num_threads; ++i) {
delete(regularizers[i]);
regularizers[i]=NULL;
}
delete[](regularizers);
} else {
cerr << "Not implemented" << endl;
return;
}
};
}
#endif
|
GB_unaryop__identity_uint64_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint64_bool
// op(A') function: GB_tran__identity_uint64_bool
// C type: uint64_t
// A type: bool
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint64_bool
(
uint64_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mkl_util.h | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#ifdef INTEL_MKL
#include <string>
#include <vector>
#include <unordered_map>
#include <utility>
#ifdef INTEL_MKL_ML
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#include "mkl_service.h"
#include "mkl_trans.h"
#endif
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
#ifndef INTEL_MKL_ML
#include "mkldnn.hpp"
#include "tensorflow/core/lib/core/stringpiece.h"
using mkldnn::engine;
using mkldnn::memory;
using mkldnn::padding_kind;
using mkldnn::primitive;
using mkldnn::reorder;
#endif
#ifdef _WIN32
typedef unsigned int uint;
#endif
namespace tensorflow {
// The file contains a number of utility classes and functions used by MKL
// enabled kernels
// This class encapsulates all the meta data that is associated with an MKL
// tensor. A tensor is an MKL tensor if it was created as the result of an
// MKL operation, and did not go through a conversion to a standard
// Tensorflow tensor.
typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims;
typedef enum {
Dim_N = 0,
Dim_C = 1,
Dim_H = 2,
Dim_W = 3,
Dim_O = 0,
Dim_I = 1
} MklDnnDims;
#ifdef INTEL_MKL_ML
class MklShape {
public:
MklShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy
~MklShape() {
if (sizes_) delete[] sizes_;
if (strides_) delete[] strides_;
if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS);
if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS);
if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_;
}
const bool IsMklTensor() const { return isMklTensor_; }
void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; }
void SetDimensions(const size_t dimension) { dimension_ = dimension; }
void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; }
void SetMklLayout(const void* primitive, size_t resourceType) {
CHECK_EQ(
dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive,
(dnnResourceType_t)resourceType),
E_SUCCESS);
}
void SetTfLayout(const size_t dimension, const size_t* sizes,
const size_t* strides) {
dimension_ = dimension;
if (dimension > 0) { // MKl doesn't support zero dimension tensors
sizes_ = new size_t[dimension];
strides_ = new size_t[dimension];
for (int ii = 0; ii < dimension; ii++) {
sizes_[ii] = sizes[ii];
strides_[ii] = strides[ii];
}
CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides),
E_SUCCESS);
}
}
// Default case - MKL dim ordering is opposite of TF dim ordering
// MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim
// TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim
// For layers that rely on data_format semantics (conv, pooling etc.)
// or operate only on certain dimensions (relu, concat, split etc.),
// Mkl APIs might require us to reorder these dimensions. In such cases,
// kernels should explicitly set this map
void SetTfDimOrder(const size_t dimension) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = dimension - (ii + 1);
}
}
void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii];
}
}
void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
CHECK_EQ(dimension, 4);
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N;
}
const dnnLayout_t GetMklLayout() const { return mklLayout_; }
const dnnLayout_t GetTfLayout() const { return tfLayout_; }
const dnnLayout_t GetCurLayout() const {
return isMklTensor_ ? mklLayout_ : tfLayout_;
}
size_t GetDimension() const { return dimension_; }
const size_t* GetSizes() const { return sizes_; }
int64 dim_size(int index) const { return sizes_[index]; }
int64 tf_dim_size(int index) const {
return sizes_[tf_to_mkl_dim_map_[index]];
}
const size_t* GetStrides() const { return strides_; }
const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; }
size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Channel dimension.
bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Batch dimension.
bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Width dimension.
bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Height dimension.
bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; }
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NCHW format.
bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NHWC format.
bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
void GetConvertedFlatData(dnnLayout_t targetLayout, void* input,
void* output) const {
dnnLayout_t curLayout;
if (isMklTensor_)
curLayout = mklLayout_;
else
curLayout = tfLayout_;
dnnPrimitive_t convert;
CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout),
E_SUCCESS);
CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS);
CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS);
}
// The following methods are used for serializing and de-serializing the
// contents of the mklshape object.
// The data is serialized in this order
// isMklTensor_
// dimension_
// sizes_
// strides_
// mklLayout_
// tfLayout_
// tf_to_mkl_dim_map_
#define SIZE_OF_MKL_DNN_BUF \
(dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to
// serialize dnn_layout pointer
// Size of buffer to hold the serialized object, the size is computed as
// follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) +
// sizeof(strides_)
// + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer)
// + sizeof(tf_to_mkl_dim_map_)
#define SIZE_OF_MKL_SERIAL_DATA(dims) \
(2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF)
// First we need to define some macro for offsets into the serial buffer where
// different elements of Mklshape is written/read from
#define IS_MKL_TENSOR_OFFSET 0
// Location from start of buffer where isMklTensor_ is serialized
#define DIMS_OFFSET \
(IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_
// Location of sizes. Note dim is not used here, left here
// to make macros consistent.
#define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t))
#define STRIDES_OFFSET(dims) \
(SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides
#define MKL_LAYOUT_OFFSET(dims) \
(STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_
#define TF_LAYOUT_OFFSET(dims) \
(MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_
// Location of tf_to_mkl_dim_map_
#define TF_TO_MKL_DIM_MAP_OFFSET(dims) \
(TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF)
// TODO(agramesh1) make sure to create a const to share with rewrite pass
// for min size of MKL metadata tensor.
void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) {
CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize";
// Make sure buffer holds at least isMklTensor_
isMklTensor_ =
*reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0;
if (isMklTensor_) { // If it is an MKL Tensor then read the rest
dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET));
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small in DeSerialize";
sizes_ = new size_t[dimension_];
strides_ = new size_t[dimension_];
tf_to_mkl_dim_map_ = new size_t[dimension_];
for (int i = 0; i < dimension_; i++) {
sizes_[i] =
reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i];
strides_[i] = reinterpret_cast<const size_t*>(
buf + STRIDES_OFFSET(dimension_))[i];
tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>(
buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i];
}
CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_,
buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
void SerializeMklShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small to Serialize";
*reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) =
isMklTensor_ ? 1 : 0;
if (isMklTensor_) {
*(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_;
for (int i = 0; i < dimension_; i++) {
reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] =
sizes_[i];
reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] =
strides_[i];
reinterpret_cast<size_t*>(buf +
TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] =
tf_to_mkl_dim_map_[i];
}
CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(
dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
private:
bool isMklTensor_ =
false; // Flag to indicate if the tensor is an MKL tensor or not
dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout
dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding
// Tensorflow tensor, used when conversion from MKL to standard tensor
size_t dimension_ = 0;
size_t* sizes_ = nullptr; // Required by MKL for conversions
size_t* strides_ = nullptr; // Required by MKL for conversions
size_t* tf_to_mkl_dim_map_ =
nullptr; // TF dimension corresponding to this MKL dimension
};
#else
// Forward decl
TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format);
memory::dims CalculateTFStrides(const memory::dims& dims_tf_order);
memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype);
class MklDnnShape {
private:
typedef struct {
/// Flag to indicate if the tensor is an MKL tensor or not
bool is_mkl_tensor_ = false;
/// Number of dimensions in Tensorflow format
size_t dimension_ = 0;
/// Required by MKLDNN for conversions
mkldnn_dims_t sizes_; // Required by MKL for conversions
memory::format tf_data_format_ = memory::format::format_undef;
memory::data_type T_ = memory::data_type::data_undef;
// MKL layout
mkldnn_memory_desc_t mkl_md_;
/// TF dimension corresponding to this MKL dimension
mkldnn_dims_t map_;
} MklShapeData;
MklShapeData data_;
typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t;
#define INVALID_DIM_SIZE -1
public:
MklDnnShape() {
for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
++i) {
data_.sizes_[i] = -1;
}
for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) {
data_.map_[i] = -1;
}
}
~MklDnnShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy
/// Helper function to compare memory::desc objects for MklDnn.
/// May be this should go into MklDnn directly.
inline bool CompareMklDnnLayouts(const memory::desc& md1,
const memory::desc& md2) const {
mkldnn_memory_desc_t mdd1 = md1.data;
mkldnn_memory_desc_t mdd2 = md2.data;
const char* d1 = reinterpret_cast<const char*>(&mdd1);
const char* d2 = reinterpret_cast<const char*>(&mdd2);
size_t md_size = sizeof(mdd1);
for (size_t i = 0; i < md_size; i++) {
if (*d1++ != *d2++) {
return false;
}
}
return true;
}
/// Equality function for MklDnnShape objects
/// @return true if both are equal; false otherwise.
inline bool operator==(const MklDnnShape& input_shape) const {
if (this->IsMklTensor() != input_shape.IsMklTensor()) {
return false;
}
// If input tensors are in Mkl layout, then we check for dimensions and
// sizes.
if (this->IsMklTensor()) {
return this->GetTfShape() == input_shape.GetTfShape() &&
CompareMklDnnLayouts(this->GetMklLayout(),
input_shape.GetMklLayout());
}
return true;
}
/// Equality operator for MklDnnShape and TFShape.
/// Returns: true if TF shapes for both are the same, false otherwise
inline bool operator==(const TensorShape& input_shape) const {
if (!this->IsMklTensor()) {
return false;
}
return this->GetTfShape() == input_shape;
}
inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; }
inline void SetMklTensor(bool is_mkl_tensor) {
data_.is_mkl_tensor_ = is_mkl_tensor;
}
inline void SetDimensions(const size_t dimension) {
data_.dimension_ = dimension;
}
inline size_t GetDimension(char dimension) const {
int index = GetMklDnnTensorDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline int32 GetMklDnnTensorDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims::Dim_N;
case 'C':
return MklDnnDims::Dim_C;
case 'H':
return MklDnnDims::Dim_H;
case 'W':
return MklDnnDims::Dim_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline size_t GetDimension() const { return data_.dimension_; }
inline const int* GetSizes() const {
return reinterpret_cast<const int*>(&data_.sizes_[0]);
}
// Returns an mkldnn::memory::dims object that contains the sizes of this
// MklDnnShape object.
inline memory::dims GetSizesAsMklDnnDims() const {
memory::dims retVal;
if (data_.is_mkl_tensor_) {
size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
for (size_t i = 0; i < dimensions; i++) {
if (data_.sizes_[i] != INVALID_DIM_SIZE)
retVal.push_back(data_.sizes_[i]);
}
} else {
CHECK_EQ(data_.is_mkl_tensor_, true);
}
return retVal;
}
inline int64 DimSize(int index) const {
CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0]));
return data_.sizes_[index];
}
/// Return TensorShape that describes the Tensorflow shape of the tensor
/// represented by this MklShape.
inline TensorShape GetTfShape() const {
CHECK_EQ(data_.is_mkl_tensor_, true);
std::vector<int32> shape(data_.dimension_, -1);
if (data_.tf_data_format_ != memory::format::blocked) {
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[TfDimIdx(idx)];
}
} else {
// If Tensorflow shape is in Blocked format, then we don't have dimension
// map for it. So we just create Tensorflow shape from sizes in the
// specified order.
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[idx];
}
}
TensorShape ts;
bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok();
CHECK_EQ(ret, true);
return ts;
}
inline void SetElemType(memory::data_type dt) { data_.T_ = dt; }
inline const memory::data_type GetElemType() { return data_.T_; }
inline void SetMklLayout(memory::primitive_desc* pd) {
CHECK_NOTNULL(pd);
data_.mkl_md_ = pd->desc().data;
}
inline void SetMklLayout(memory::desc* md) {
CHECK_NOTNULL(md);
data_.mkl_md_ = md->data;
}
inline const memory::desc GetMklLayout() const {
return memory::desc(data_.mkl_md_);
}
inline memory::format GetTfDataFormat() const {
return data_.tf_data_format_;
}
/// We don't create primitive_descriptor for TensorFlow layout now.
/// We use lazy evaluation and create it only when needed. Input format can
/// also be Blocked format.
inline void SetTfLayout(size_t dims, const memory::dims& sizes,
memory::format format) {
CHECK_EQ(dims, sizes.size());
data_.dimension_ = dims;
for (size_t ii = 0; ii < dims; ii++) {
data_.sizes_[ii] = sizes[ii];
}
data_.tf_data_format_ = format;
if (format != memory::format::blocked) {
SetTfDimOrder(dims, format);
}
}
inline const memory::desc GetTfLayout() const {
memory::dims dims;
for (size_t ii = 0; ii < data_.dimension_; ii++) {
dims.push_back(data_.sizes_[ii]);
}
// Create Blocked memory desc if input TF format was set like that.
if (data_.tf_data_format_ == memory::format::blocked) {
auto strides = CalculateTFStrides(dims);
return CreateBlockedMemDescHelper(dims, strides, data_.T_);
} else {
return memory::desc(dims, data_.T_, data_.tf_data_format_);
}
}
inline const memory::desc GetCurLayout() const {
return IsMklTensor() ? GetMklLayout() : GetTfLayout();
}
// nhasabni - I've removed SetTfDimOrder that was setting default order in
// case of MKL-ML. We don't need a case of default dimension order because
// when an operator that does not get data_format attribute gets all inputs
// in Tensorflow format, it will produce output in Tensorflow format.
inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) {
CHECK(dimension == data_.dimension_);
for (size_t ii = 0; ii < dimension; ii++) {
data_.map_[ii] = map[ii];
}
}
inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
// TODO(nhasabni): Why do we restrict this to 4D?
CHECK_EQ(dimension, 4);
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W;
data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H;
data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C;
data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N;
}
inline void SetTfDimOrder(const size_t dimension, memory::format format) {
TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format);
SetTfDimOrder(dimension, data_format);
}
inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; }
inline size_t TfDimIdx(int index) const { return data_.map_[index]; }
inline int64 TfDimSize(int index) const {
return data_.sizes_[TfDimIdx(index)];
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Channel dimension.
inline bool IsMklChannelDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_C;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Batch dimension.
inline bool IsMklBatchDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_N;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Width dimension.
inline bool IsMklWidthDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_W;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Height dimension.
inline bool IsMklHeightDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_H;
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NCHW format.
inline bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NHWC format.
inline bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// The following methods are used for serializing and de-serializing the
/// contents of the mklshape object.
/// The data is serialized in this order
/// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_;
/// Size of buffer to hold the serialized object, the size is computed by
/// following above mentioned order
inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); }
void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small to SerializeMklDnnShape";
*reinterpret_cast<MklShapeData*>(buf) = data_;
}
void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) {
// Make sure buffer holds at least is_mkl_tensor_.
CHECK(buf_size >= sizeof(data_.is_mkl_tensor_))
<< "Buffer size is too small in DeSerializeMklDnnShape";
const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf);
if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small in DeSerializeMklDnnShape";
data_ = *reinterpret_cast<const MklShapeData*>(buf);
}
}
};
#endif
// List of MklShape objects. Used in Concat/Split layers.
#ifndef INTEL_MKL_ML
typedef std::vector<MklDnnShape> MklDnnShapeList;
#else
typedef std::vector<MklShape> MklShapeList;
#endif
#ifdef INTEL_MKL_ML
// Check if all tensors specified by MklShapes are MKL tensors.
inline bool AreAllMklTensors(const MklShapeList& shapes) {
for (auto& s : shapes) {
if (!s.IsMklTensor()) {
return false;
}
}
return true;
}
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklShape& mkl_shape) {
Tensor output_tensor;
TensorShape output_shape;
for (size_t j = 0; j < mkl_shape.GetDimension(); j++) {
// Outermost to innermost dimension
output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]);
}
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor);
dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout());
void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data());
void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data());
if (mkl_tensor.NumElements() != 0) {
mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer);
}
return output_tensor;
}
#else
using mkldnn::stream;
template <typename T> class MklDnnData;
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklDnnShape& mkl_shape) {
Tensor output_tensor;
try {
if (!mkl_shape.IsMklTensor())
return mkl_tensor; // return input since it is already TF tensor
TensorShape output_shape = mkl_shape.GetTfShape();;
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(),
output_shape, &output_tensor);
auto cpu_engine = engine(engine::cpu, 0);
MklDnnData<T> input(&cpu_engine);
// Get Mkl layout of input tensor.
auto input_mkl_md = mkl_shape.GetMklLayout();
auto output_tf_md = mkl_shape.GetTfLayout();
auto output_tf_pd = memory::primitive_desc(output_tf_md, cpu_engine);
input.SetUsrMem(input_mkl_md, &mkl_tensor);
// reorder
if (input.IsReorderNeeded(output_tf_pd)) {
std::vector<primitive> net;
CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, &output_tensor, &net),
true);
stream(stream::kind::eager).submit(net).wait();
} else {
// If not, just forward input tensor to output tensor.
CHECK(output_tensor.CopyFrom(mkl_tensor, output_shape));
}
} catch (mkldnn::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
LOG(FATAL) << "Operation received an exception: " << error_msg;
}
return output_tensor;
}
#endif
// Get the MKL shape from the second string tensor
#ifdef INTEL_MKL_ML
inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) {
mklshape->DeSerializeMklShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#else
inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) {
mklshape->DeSerializeMklDnnShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#endif
// Gets the actual input
inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) {
return ctext->input(GetTensorDataIndex(n, ctext->num_inputs()));
}
inline void GetMklInputList(OpKernelContext* ctext, StringPiece name,
OpInputList* input_tensors) {
CHECK_NOTNULL(input_tensors);
ctext->input_list(name, input_tensors);
}
#ifdef INTEL_MKL_ML
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#else
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklDnnShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklDnnShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#endif
#ifndef INTEL_MKL_ML
/// Get shape of input tensor pointed by 'input_idx' in TensorShape format.
/// If the input tensor is in MKL layout, then obtains TensorShape from
/// MklShape.
inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) {
// Sanity check.
CHECK_NOTNULL(context);
CHECK_LT(input_idx, context->num_inputs());
MklDnnShape input_mkl_shape;
GetMklShape(context, input_idx, &input_mkl_shape);
if (input_mkl_shape.IsMklTensor()) {
return input_mkl_shape.GetTfShape();
} else {
const Tensor& t = MklGetInput(context, input_idx);
return t.shape();
}
}
#endif
#ifdef INTEL_MKL_ML
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#else
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
#ifdef INTEL_MKL_ML
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#else
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
// Allocates a temp tensor and returns the data buffer for temporary storage.
// Currently
#ifndef INTEL_MKL_ML
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
const memory::primitive_desc& pd, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(pd.get_size() / sizeof(T) + 1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<T>().data());
}
#else
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
dnnLayout_t lt_buff, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(
dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) /
sizeof(float) +
1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<float>().data());
}
#endif
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
TensorShape tf_shape) {
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
}
inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides,
const size_t* sizes) {
// MKL requires strides in NCHW
if (data_format == FORMAT_NHWC) {
strides[0] = sizes[2];
strides[1] = sizes[0] * sizes[2];
strides[2] = 1;
strides[3] = sizes[0] * sizes[1] * sizes[2];
} else {
strides[0] = 1;
strides[1] = sizes[0];
strides[2] = sizes[0] * sizes[1];
strides[3] = sizes[0] * sizes[1] * sizes[2];
}
}
#ifdef INTEL_MKL_ML
inline void MklSizesToTFSizes(OpKernelContext* context,
TensorFormat data_format_,
const MklShape& mkl_shape,
TensorShape* tf_shape) {
size_t tf_dim = mkl_shape.GetDimension();
const size_t* tf_sizes = mkl_shape.GetSizes();
OP_REQUIRES(context, tf_dim == 4,
errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim"));
std::vector<int32> sizes;
sizes.push_back(tf_sizes[3]);
if (data_format_ == FORMAT_NHWC) {
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
sizes.push_back(tf_sizes[2]);
} else {
sizes.push_back(tf_sizes[2]);
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
}
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape));
}
#endif
inline int32 GetMklTensorDimIndex(char dimension) {
switch (dimension) {
case 'N':
return MklDims::N;
case 'C':
return MklDims::C;
case 'H':
return MklDims::H;
case 'W':
return MklDims::W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
#ifdef INTEL_MKL_ML
inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) {
int index = GetMklTensorDimIndex(dimension);
CHECK(index >= 0 && index < mkl_shape.GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return mkl_shape.dim_size(index);
}
#endif
inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
const Tensor& meta = context->input(idx_meta_in);
Tensor output(data.dtype());
Tensor meta_output(meta.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, data.shape()));
CHECK(meta_output.CopyFrom(meta, meta.shape()));
context->set_output(idx_data_out, output);
context->set_output(idx_meta_out, meta_output);
}
#ifdef INTEL_MKL_ML
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#else
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#endif
#ifdef INTEL_MKL_ML
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#else
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklDnnShape dnn_shape_output;
dnn_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, dnn_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifndef INTEL_MKL_ML
// Set a dummy MKLDNN shape (called when the output is in TF format)
inline void SetDummyMklDnnShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context,
int idx_in, int idx_out,
const MklDnnShape& mkl_shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
AllocateOutputSetMklShape(context, idx_out, mkl_shape);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
// Forward the MKL shape ONLY (used in elementwise and other ops where
// we call the eigen implementation and MKL shape is not used)
inline void ForwardMklMetaDataInToOut(OpKernelContext* context,
uint32 idx_data_in,
uint32_t idx_data_out) {
uint32 idx_meta_in =
GetTensorMetaDataIndex(idx_data_in, context->num_inputs());
uint32 idx_meta_out =
GetTensorMetaDataIndex(idx_data_out, context->num_outputs());
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifdef INTEL_MKL_ML
// Set a dummy MKL shape (called when the output is in TF format)
inline void SetDummyMklShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
// We don't need these functions in MKLDNN. We have defined equality operator
// on MklDnnShape class directly.
// Checks if the TF shape for both MKL tensors is the same or not
// Returns: true if both TF shapes are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const MklShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const MklShape* input_shape_1) {
return MklCompareShapes(input_shape_1, input_shape_0);
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->dims() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->dims();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// These functions do not compile with MKL-DNN since mkl.h is missing.
// We may need to remove them later.
// TODO(intel_tf): Remove this routine when faster MKL layout conversion is
// out.
inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = input.dim_size(0);
int64 H = input.dim_size(1);
int64 W = input.dim_size(2);
int64 C = input.dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C,
buf_out + n * stride_n, H * W);
}
}
inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = (*output)->dim_size(0);
int64 H = (*output)->dim_size(1);
int64 W = (*output)->dim_size(2);
int64 C = (*output)->dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W,
buf_out + n * stride_n, C);
}
}
#endif
// -------------------------------------------------------------------
#ifndef INTEL_MKL_ML
/// Return MKL-DNN data type (memory::data_type) for input type T
///
/// @input None
/// @return memory::data_type corresponding to type T
template <typename T>
static memory::data_type MklDnnType();
/// Instantiation for float type. Add similar instantiations for other
/// type if needed.
template <>
memory::data_type MklDnnType<float>() {
return memory::data_type::f32;
}
/// Map TensorFlow's data format into MKL-DNN data format
///
/// @input: TensorFlow data format
/// @return: memory::format corresponding to TensorFlow data format;
/// Fails with an error if invalid data format.
inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC)
return memory::format::nhwc;
else if (format == FORMAT_NCHW)
return memory::format::nchw;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to get rid of compiler warning
return memory::format::format_undef;
}
/// Map MKL-DNN data format to TensorFlow's data format
///
/// @input: memory::format
/// @return: Tensorflow data format corresponding to memory::format
/// Fails with an error if invalid data format.
inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) {
if (format == memory::format::nhwc)
return FORMAT_NHWC;
else if (format == memory::format::nchw)
return FORMAT_NCHW;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure
// that we don't come here.
return FORMAT_NHWC;
}
/// Map TensorShape object into memory::dims required by MKL-DNN
///
/// This function will simply map input TensorShape into MKL-DNN dims
/// naively. So it will preserve the order of dimensions. E.g., if
/// input tensor is in NHWC format, then dims will be in NHWC format
/// also.
///
/// @input TensorShape object in shape
/// @return memory::dims corresponding to TensorShape
inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) {
memory::dims dims(shape.dims());
for (int d = 0; d < shape.dims(); ++d) {
dims[d] = shape.dim_size(d);
}
return dims;
}
/// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN
///
/// This function is a specific one than above function. It will map input
/// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the
/// order of dimensions. E.g., if input tensor is in NHWC format, then dims
/// will be in NCHW format, and not in NHWC format.
///
/// @input TensorShape object in shape
/// @return memory::dims in MKL-DNN required NCHW format
inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = shape.dim_size(GetTensorDimIndex(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex(format, 'C'));
int h = shape.dim_size(GetTensorDimIndex(format, 'H'));
int w = shape.dim_size(GetTensorDimIndex(format, 'W'));
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Overloaded version of function above. Input parameters are
/// self-explanatory.
inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = in_dims[GetTensorDimIndex(format, 'N')];
int c = in_dims[GetTensorDimIndex(format, 'C')];
int h = in_dims[GetTensorDimIndex(format, 'H')];
int w = in_dims[GetTensorDimIndex(format, 'W')];
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Map MklDnn memory::dims object into TensorShape object.
///
/// This function will simply map input shape in MKL-DNN memory::dims format
/// in Tensorflow's TensorShape object by preserving dimension order.
///
/// @input MKL-DNN memory::dims object
/// @output TensorShape corresponding to memory::dims
inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) {
std::vector<int32> shape(dims.size(), -1);
for (int d = 0; d < dims.size(); d++) {
shape[d] = dims[d];
}
TensorShape ret;
CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true);
return ret;
}
/// Function to calculate strides given tensor shape in Tensorflow order
/// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention,
/// dimesion with size 1 is outermost dimension; while dimension with size 4 is
/// innermost dimension. So strides for this tensor would be {4 * 3 * 2,
/// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}.
///
/// @input Tensorflow shape in memory::dims type
/// @return memory::dims containing strides for the tensor.
inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) {
CHECK_GT(dims_tf_order.size(), 0);
memory::dims strides(dims_tf_order.size());
int last_dim_idx = dims_tf_order.size() - 1;
strides[last_dim_idx] = 1;
for (int d = last_dim_idx - 1; d >= 0; d--) {
strides[d] = strides[d + 1] * dims_tf_order[d + 1];
}
return strides;
}
inline padding_kind TFPaddingToMklDnnPadding(Padding pad) {
// MKL-DNN only supports zero padding.
return padding_kind::zero;
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype) {
CHECK_EQ(dim.size(), strides.size());
// We have to construct memory descriptor in a C style. This is not at all
// ideal but MKLDNN does not offer any API to construct descriptor in
// blocked format except a copy constructor that accepts
// mkldnn_memory_desc_t.
mkldnn_memory_desc_t md;
md.primitive_kind = mkldnn_memory;
md.ndims = dim.size();
md.format = mkldnn_blocked;
md.data_type = memory::convert_to_c(dtype);
for (size_t i = 0; i < dim.size(); i++) {
md.layout_desc.blocking.block_dims[i] = 1;
md.layout_desc.blocking.strides[1][i] = 1;
md.layout_desc.blocking.strides[0][i] = strides[i];
md.layout_desc.blocking.padding_dims[i] = dim[i];
md.layout_desc.blocking.offset_padding_to_data[i] = 0;
md.dims[i] = dim[i];
}
md.layout_desc.blocking.offset_padding = 0;
return memory::desc(md);
}
/*
* Class to represent all the resources corresponding to a tensor in TensorFlow
* that are required to execute an operation (such as Convolution).
*/
template <typename T>
class MklDnnData {
private:
/// MKL-DNN memory primitive for input user memory
memory* user_memory_;
/// MKL-DNN memory primitive in case input or output reorder is needed.
memory* reorder_memory_;
/// Operations memory descriptor
memory::desc* op_md_;
/// CPU engine on which operation will be executed
const engine* cpu_engine_;
public:
explicit MklDnnData(const engine* e)
: user_memory_(nullptr),
reorder_memory_(nullptr),
op_md_(nullptr),
cpu_engine_(e) {}
~MklDnnData() {
cpu_engine_ = nullptr; // We don't own this.
delete (user_memory_);
delete (reorder_memory_);
delete (op_md_);
}
inline void* GetTensorBuffer(const Tensor* tensor) const {
CHECK_NOTNULL(tensor);
return const_cast<void*>(
static_cast<const void*>(tensor->flat<T>().data()));
}
/// Set user memory primitive using specified dimensions, memory format and
/// data_buffer. Function automatically uses element data type by using
/// input type T used for creating call object.
///
/// In a nutshell, function allows user to describe the input tensor to
/// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and
/// memory format HWIO, and the buffer that contains actual values is
/// pointed by data_buffer.
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
void* data_buffer = nullptr) {
auto md = memory::desc(dim, MklDnnType<T>(), fm);
SetUsrMem(md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, fm, GetTensorBuffer(tensor));
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim,
const memory::dims& strides) {
return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>());
}
/// A version of SetUsrMem call that allows user to create memory in blocked
/// format. So in addition to accepting dimensions, it also accepts strides.
/// This allows user to create memory for tensor in a format that is not
/// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6
/// dimensional tensor as a native format. But by using blocked format, a user
/// can create memory for 6D tensor.
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
void* data_buffer = nullptr) {
CHECK_EQ(dim.size(), strides.size());
auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides);
SetUsrMem(blocked_md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, strides, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts memory
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) {
auto pd = memory::primitive_desc(md, *cpu_engine_);
SetUsrMem(pd, data_buffer);
}
/// A version of SetUsrMem with memory descriptor and tensor
inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(md, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts primitive
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::primitive_desc& pd,
void* data_buffer = nullptr) {
CHECK_NOTNULL(cpu_engine_);
// TODO(nhasabni): can we remove dynamic memory allocation?
if (data_buffer) {
user_memory_ = new memory(pd, data_buffer);
} else {
user_memory_ = new memory(pd);
}
}
/// A version of SetUsrMem with primitive descriptor and tensor
inline void SetUsrMem(const memory::primitive_desc& pd,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(pd, GetTensorBuffer(tensor));
}
/// Get function for user memory primitive.
inline const memory* GetUsrMem() const { return user_memory_; }
/// Get function for primitive descriptor of user memory primitive.
inline const memory::primitive_desc GetUsrMemPrimDesc() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_primitive_desc();
}
/// Get function for descriptor of user memory.
inline memory::desc GetUsrMemDesc() {
// This is ugly. Why MKL-DNN does not provide desc() method of const type??
const memory::primitive_desc pd = GetUsrMemPrimDesc();
return const_cast<memory::primitive_desc*>(&pd)->desc();
}
/// Get function for data buffer of user memory primitive.
inline void* GetUsrMemDataHandle() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_data_handle();
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(void* data_buffer) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(data_buffer);
user_memory_->set_data_handle(data_buffer);
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(const Tensor* tensor) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(tensor);
user_memory_->set_data_handle(GetTensorBuffer(tensor));
}
/// Get the memory primitive for input and output of an op. If inputs
/// to an op require reorders, then this function returns memory primitive
/// for reorder. Otherwise, it will return memory primitive for user memory.
///
/// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to
/// execute Conv2D, we need memory primitive for I and F. Buf if reorder is
/// required for I and F (say I_r is reorder primitive for I; F_r is reorder
/// primitive for F), then we need I_r and F_r to perform Conv2D.
inline const memory& GetOpMem() const {
return reorder_memory_ ? *reorder_memory_ : *user_memory_;
}
/// Set memory descriptor of an operation in terms of dimensions and memory
/// format. E.g., For Conv2D, the dimensions would be same as user dimensions
/// but memory::format would be mkldnn::any because we want MKL-DNN to choose
/// best layout/format for given input dimensions.
inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) {
// TODO(nhasabni): can we remove dynamic memory allocation?
op_md_ = new memory::desc(dim, MklDnnType<T>(), fm);
}
/// Get function for memory descriptor for an operation
inline const memory::desc& GetOpMemDesc() const { return *op_md_; }
/// Predicate that checks if we need to reorder user's memory into memory
/// pointed by op_pd.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const {
CHECK_NOTNULL(user_memory_);
return op_pd != user_memory_->get_primitive_desc();
}
/// Predicate that checks if we need to reorder user's memory into memory
/// based on the provided format.
///
/// @input: target_format - memory format of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::format& target_format) const {
CHECK_NOTNULL(user_memory_);
return target_format !=
user_memory_->get_primitive_desc().desc().data.format;
}
/// Function to create a reorder from memory pointed by from to memory pointed
/// by to. Returns created primitive.
inline primitive CreateReorder(const memory* from, const memory* to) const {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
return reorder(*from, *to);
}
/// Function to handle input reordering
///
/// Check if we need to reorder this input of an operation.
/// Return true and allocate reorder memory primitive if reorder is needed.
/// Otherwise, return false and do not allocate reorder memory primitive.
///
/// To check if reorder is needed, this function compares memory primitive
/// descriptor of an operation (op_pd) for the given input with the
/// user-specified memory primitive descriptor.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// Overloaded version of above function that accepts memory buffer
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_data_handle - memory buffer where output of reorder needs to be
/// stored. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
void* reorder_data_handle,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_data_handle);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd, reorder_data_handle);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// Another overloaded version of CheckReorderToOpMem that accepts Tensor
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_tensor - Tensor whose buffer is to be used to store output of
/// reorder. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
Tensor* reorder_tensor,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net);
}
/// Function to handle output reorder
///
/// This function performs very similar functionality as input reordering
/// function above. The only difference is that this function does not add
/// reorder primitive to the net. The reason for this is: the reorder
/// primitive for output needs to be added to the list only after operation
/// has executed. But we need to prepare a temporary buffer in case output
/// reorder is needed. And this temporary buffer will hold the output of
/// an operation before it is fed to reorder primitive.
///
/// @input memory primitive descriptor for the given output of an operation
/// @return: true in case reorder of output is needed; false, otherwise.
inline bool PrepareReorderToUserMemIfReq(
const memory::primitive_desc& op_pd) {
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
return true;
}
return false;
}
/// Function to actually insert reorder primitive in the net
///
/// This function completes remaining part of output reordering. It inserts
/// a reordering primitive from the temporary buffer that holds the output
/// to the user-specified output buffer.
///
/// @input: net - net to which to add reorder primitive
inline void InsertReorderToUserMem(std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(reorder_memory_);
net->push_back(CreateReorder(reorder_memory_, user_memory_));
}
};
/// Base class for operations with reuse of primitives
///
class MklPrimitive {
public:
virtual ~MklPrimitive() {}
// Dummy data. Its size, hard-coded as 256 here, does
// not matter since MKL should never operate on this buffer.
unsigned char DummyData[256];
};
const mkldnn::memory::dims NONE_DIMS = {};
template <typename T>
class MklPrimitiveFactory {
public:
MklPrimitiveFactory() {}
~MklPrimitiveFactory() {}
MklPrimitive* GetOp(const std::string& key) {
auto stream_iter = MklPrimitiveFactory<T>::GetHashMap().find(key);
if (stream_iter == MklPrimitiveFactory<T>::GetHashMap().end()) {
return nullptr;
} else {
return stream_iter->second;
}
}
void SetOp(const std::string& key, MklPrimitive* op) {
auto stream_iter = MklPrimitiveFactory<T>::GetHashMap().find(key);
CHECK(stream_iter == MklPrimitiveFactory<T>::GetHashMap().end());
MklPrimitiveFactory<T>::GetHashMap()[key] = op;
}
private:
static inline std::unordered_map<std::string, MklPrimitive*> &GetHashMap() {
static thread_local std::unordered_map<std::string, MklPrimitive*> map_;
return map_;
}
};
// utility class for creating keys of MKL primitive pool.
class FactoryKeyCreator {
public:
FactoryKeyCreator() {
key_.reserve(kMaxKeyLength);
}
~FactoryKeyCreator() {}
void AddAsKey(const string& str) { Append(str); }
void AddAsKey(const mkldnn::memory::dims &dims) {
for (unsigned int i = 0; i < dims.size(); i++) {
AddAsKey<int>(dims[i]);
}
}
template <typename T>
void AddAsKey(const T data) {
auto buffer = reinterpret_cast<const char *>(&data);
Append(StringPiece(buffer, sizeof(T)));
}
std::string GetKey() {
return key_;
}
private:
string key_;
const char delimiter = 'x';
const int kMaxKeyLength = 256;
void Append(StringPiece s) {
key_.append(s.ToString());
key_.append(1, delimiter);
}
};
#endif // INTEL_MKL_DNN
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
|
workshare.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define CHUNKSIZE 10
#define N 100
// Dynamical scheduling of work among a group of threads,
// each given a CHUNKSIZE of work repeatedly
// until the job is done
int main (int argc, char *argv[])
{
int nthreads, tid, i, chunk;
double a[N], b[N];
for (i=0; i < N; i++)
a[i] = (double)i;
chunk = CHUNKSIZE;
#pragma omp parallel shared(a,b,nthreads,chunk) private(i,tid)
{
tid = omp_get_thread_num();
if (tid == 0) {
nthreads = omp_get_num_threads();
printf("Number of threads = %d\n", nthreads);
}
printf("Thread %d starting...\n",tid);
#pragma omp for schedule(dynamic,chunk)
for (i=0; i<N; i++) {
b[i] = a[i]*a[i];
printf("Thread %d: c[%d]= %f\n",tid,i,b[i]);
}
}
return 0;
}
|
GB_binop__lxor_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int8)
// A*D function (colscale): GB (_AxD__lxor_int8)
// D*A function (rowscale): GB (_DxB__lxor_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int8)
// C=scalar+B GB (_bind1st__lxor_int8)
// C=scalar+B' GB (_bind1st_tran__lxor_int8)
// C=A+scalar GB (_bind2nd__lxor_int8)
// C=A'+scalar GB (_bind2nd_tran__lxor_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_INT8 || GxB_NO_LXOR_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Lorentz_system_calculation.c | #include <stdio.h>
#include <omp.h>
#include <unistd.h>
#include <math.h>
#include <gmp.h>
#include <stdlib.h>
//#define N 200000
//
////int N =2;ßß
//#define Tn 5 //actually N+1
//#define K 10 // order of precision for the use with GMP
int main(int argc, char *argv[]){
int N=400;
int Tn=1200;
double time_s = omp_get_wtime();
int Numt=omp_get_max_threads();
printf("Threads sum = %d\n",Numt);
mpf_t tempv[Numt];
mpf_t sum[2*Numt];
mpf_t X;//=malloc(Tn*sizeof(mpf_t));
mpf_t Y;//=malloc(Tn*sizeof(mpf_t));
mpf_t Z;//=malloc(Tn*sizeof(mpf_t));
mpf_t temp;
mpf_t s1;
mpf_t s2;
mpf_t divs;
mpf_t R;
mpf_t Sigma;
mpf_t b;
int tid=0;
int k=0;
int j=0;
mpf_t x_red;
mpf_t y_red;
mpf_t z_red;
mpf_t tpow;
mpf_t timestep;
mpf_t c;
mpf_t count;
mpf_t x[N+1];
mpf_t y[N+1];
mpf_t z[N+1];
mpf_init(R);mpf_init(Sigma);mpf_init(timestep);mpf_init(b);mpf_init(R);mpf_init(tpow);mpf_init(x_red);mpf_init(X);mpf_init(y_red);mpf_init(Y);mpf_init(z_red);mpf_init(Z);mpf_init(s1);mpf_init(s2);mpf_init(temp);
mpf_init(c);
mpf_init(count);
for (int i = 0; i <(2*Numt); ++i){
mpf_init(sum[i]);
}
for (int i = 0; i <Numt; ++i){
mpf_init(tempv[i]);
}
mpf_init(divs);
for (int i = 0; i <N; ++i)
{
mpf_init(x[i]);
mpf_init(y[i]);
mpf_init(z[i]);
}
mpf_set_d(R,28);
mpf_set_d(Sigma,10);
mpf_set_d(b,8/3);
mpf_set_d(timestep,0.01);
// set initial conditions where at time, t=0;
mpf_set_d(X, - 15.8);
mpf_set_d(Y, - 17.48);
mpf_set_d(Z, 35.64);
printf("initialised completed %d\n",1);
for (int t = 1; t < Tn; t++) // start iterations from t=1 to t=Tn; we know t0
{
mpf_set(x[0], X);
mpf_set(y[0], Y);
mpf_set(z[0],Z);
for (int i = 0; i<N; i++)
{
#pragma omp parallel private(k,tid)
{
tid = omp_get_thread_num();
// printf("hi %d\n",tid);
#pragma omp for
for (k=0; k<=i; k++)
{
mpf_mul(tempv[tid],x[i-k],z[k]);
mpf_add(sum[2*tid],sum[2*tid],tempv[tid]);
mpf_mul(tempv[tid],x[i-k],y[k]);
mpf_add(sum[2*tid+1],sum[2*tid+1],tempv[tid]);
}
}
for (j=0; j<Numt; j++)// maual reduction
{
mpf_add(s1,s1,sum[2*j]);
mpf_add(s2,s2,sum[2*j+1]);
}
// compute the
mpf_set_d(c,1.0);
mpf_set_d(count,i+1.0);
mpf_div(divs ,c,count);
mpf_sub(temp,y[i],x[i]);
mpf_mul(temp,temp,divs);
mpf_mul(x[i+1],temp,Sigma);
mpf_mul(temp,R,x[i]);
mpf_sub(temp,temp,y[i]);
mpf_sub(temp,temp,s1);
mpf_mul(y[i+1],temp,divs);
mpf_mul(temp,b,z[i]);
mpf_sub(temp,s2,temp);
mpf_mul(z[i+1],temp,divs);
}
// compute the next terms of the series after Xn
for(int i=0;i<N;i++){
mpf_pow_ui(tpow,timestep,i);
mpf_mul(temp,x[i],tpow);
mpf_add(x_red,x_red,temp);
mpf_mul(temp,y[i],tpow);
mpf_add(y_red,y_red,temp);
mpf_mul(temp,z[i],tpow);
mpf_add(z_red,z_red,temp);
}
mpf_add(X,X,x_red);
mpf_add(Y,Y,y_red);
mpf_add(Z,Z,z_red);
}
// gmp_printf("X at t = %d is %.5Ff\n",Tn,X);
// gmp_printf("Y t = %d is %.5Ff\n",Tn,Y);
// gmp_printf("Z t = %d is %.5Ff\n",Tn,Z);
time_s = omp_get_wtime()-time_s;
printf("Time elapsed is %fsecs\n", time_s);
return 0;
}
|
raytracing.c | #include <stdio.h>
#include <stdlib.h>
#include "math-toolkit.h"
#include "primitives.h"
#include "raytracing.h"
#include "idx_stack.h"
#define MAX_REFLECTION_BOUNCES 3
#define MAX_DISTANCE 1000000000000.0
#define MIN_DISTANCE 0.00001
#define SAMPLES 4
#define SQUARE(x) (x * x)
#define MAX(a, b) (a > b ? a : b)
/* @param t t distance
* @return 1 means hit, otherwise 0
*/
static int raySphereIntersection(const point3 ray_e,
const point3 ray_d,
const sphere *sph,
intersection *ip, double *t1)
{
point3 l;
subtract_vector(sph->center, ray_e, l);
double s = dot_product(l, ray_d);
double l2 = dot_product(l, l);
double r2 = sph->radius * sph->radius;
if (s < 0 && l2 > r2)
return 0;
float m2 = l2 - s * s;
if (m2 > r2)
return 0;
float q = sqrt(r2 - m2);
*t1 = (l2 > r2) ? (s - q) : (s + q);
/* p = e + t1 * d */
multiply_vector(ray_d, *t1, ip->point);
add_vector(ray_e, ip->point, ip->point);
subtract_vector(ip->point, sph->center, ip->normal);
normalize(ip->normal);
if (dot_product(ip->normal, ray_d) > 0.0)
multiply_vector(ip->normal, -1, ip->normal);
return 1;
}
/* @return 1 means hit, otherwise 0; */
static int rayRectangularIntersection(const point3 ray_e,
const point3 ray_d,
rectangular *rec,
intersection *ip, double *t1)
{
point3 e01, e03, p;
subtract_vector(rec->vertices[1], rec->vertices[0], e01);
subtract_vector(rec->vertices[3], rec->vertices[0], e03);
cross_product(ray_d, e03, p);
double det = dot_product(e01, p);
/* Reject rays orthagonal to the normal vector.
* I.e. rays parallell to the plane.
*/
if (det < 1e-4)
return 0;
double inv_det = 1.0 / det;
point3 s;
subtract_vector(ray_e, rec->vertices[0], s);
double alpha = inv_det * dot_product(s, p);
if ((alpha > 1.0) || (alpha < 0.0))
return 0;
point3 q;
cross_product(s, e01, q);
double beta = inv_det * dot_product(ray_d, q);
if ((beta > 1.0) || (beta < 0.0))
return 0;
*t1 = inv_det * dot_product(e03, q);
if (alpha + beta > 1.0f) {
/* for the second triangle */
point3 e23, e21;
subtract_vector(rec->vertices[3], rec->vertices[2], e23);
subtract_vector(rec->vertices[1], rec->vertices[2], e21);
cross_product(ray_d, e21, p);
det = dot_product(e23, p);
if (det < 1e-4)
return 0;
inv_det = 1.0 / det;
subtract_vector(ray_e, rec->vertices[2], s);
alpha = inv_det * dot_product(s, p);
if (alpha < 0.0)
return 0;
cross_product(s, e23, q);
beta = inv_det * dot_product(ray_d, q);
if ((beta < 0.0) || (beta + alpha > 1.0))
return 0;
*t1 = inv_det * dot_product(e21, q);
}
if (*t1 < 1e-4)
return 0;
COPY_POINT3(ip->normal, rec->normal);
if (dot_product(ip->normal, ray_d)>0.0)
multiply_vector(ip->normal, -1, ip->normal);
multiply_vector(ray_d, *t1, ip->point);
add_vector(ray_e, ip->point, ip->point);
return 1;
}
static void localColor(color local_color,
const color light_color, double diffuse,
double specular, const object_fill *fill)
{
color ambi = { 0.1, 0.1, 0.1 };
color diff, spec, lightCo, surface;
/* Local Color = ambient * surface +
* light * ( kd * surface * diffuse + ks * specular)
*/
COPY_COLOR(diff, fill->fill_color);
multiply_vector(diff, fill->Kd, diff);
multiply_vector(diff, diffuse, diff);
COPY_COLOR(lightCo, light_color);
multiply_vectors(diff, lightCo, diff);
COPY_COLOR(spec, light_color);
multiply_vector(spec, fill->Ks, spec);
multiply_vector(spec, specular, spec);
COPY_COLOR(surface, fill->fill_color);
multiply_vectors(ambi,surface, ambi);
add_vector(diff, ambi, diff);
add_vector(diff, spec, diff);
add_vector(local_color, diff, local_color);
}
/* @param d direction of the ray into intersection
* @param l direction of intersection to light
* @param n surface normal
*/
static void compute_specular_diffuse(double *diffuse,
double *specular,
const point3 d, const point3 l,
const point3 n, double phong_pow)
{
point3 d_copy, l_copy, middle, r;
/* Calculate vector to eye V */
COPY_POINT3(d_copy, d);
multiply_vector(d_copy, -1, d_copy);
normalize(d_copy);
/* Calculate vector to light L */
COPY_POINT3(l_copy, l);
multiply_vector(l_copy, -1, l_copy);
normalize(l_copy);
/* Calculate reflection direction R */
double tmp = dot_product(n, l_copy);
multiply_vector(n, tmp, middle);
multiply_vector(middle, 2, middle);
subtract_vector(middle, l_copy, r);
normalize(r);
/* diffuse = max(0, dot_product(n, -l)) */
*diffuse = MAX(0, dot_product(n, l_copy));
/* specular = (dot_product(r, -d))^p */
*specular = pow(MAX(0, dot_product(r, d_copy)), phong_pow);
}
/* @param r direction of reflected ray
* @param d direction of primary ray into intersection
* @param n surface normal at intersection
*/
static void reflection(point3 r, const point3 d, const point3 n)
{
/* r = d - 2(d . n)n */
multiply_vector(n, -2.0 * dot_product(d, n), r);
add_vector(r, d, r);
}
/* reference: https://www.opengl.org/sdk/docs/man/html/refract.xhtml */
static void refraction(point3 t, const point3 I, const point3 N,
double n1, double n2)
{
double eta = n1 / n2;
double dot_NI = dot_product(N,I);
double k = 1.0 - eta * eta * (1.0 - dot_NI * dot_NI);
if (k < 0.0 || n2 <= 0.0)
t[0] = t[1] = t[2] = 0.0;
else {
point3 tmp;
multiply_vector(I, eta, t);
multiply_vector(N, eta * dot_NI + sqrt(k), tmp);
subtract_vector(t, tmp, t);
}
}
/* @param i direction of incoming ray, unit vector
* @param r direction of refraction ray, unit vector
* @param normal unit vector
* @param n1 refraction index
* @param n2 refraction index
*
* reference: http://graphics.stanford.edu/courses/cs148-10-summer/docs/2006--degreve--reflection_refraction.pdf
*/
static double fresnel(const point3 r, const point3 l,
const point3 normal, double n1, double n2)
{
/* TIR */
if (length(l) < 0.99)
return 1.0;
double cos_theta_i = -dot_product(r, normal);
double cos_theta_t = -dot_product(l, normal);
double r_vertical_root = (n1 * cos_theta_i - n2 * cos_theta_t) /
(n1 * cos_theta_i + n2 * cos_theta_t);
double r_parallel_root = (n2 * cos_theta_i - n1 * cos_theta_t) /
(n2 * cos_theta_i + n1 * cos_theta_t);
return (r_vertical_root * r_vertical_root +
r_parallel_root * r_parallel_root) / 2.0;
}
/* @param t distance */
static intersection ray_hit_object(const point3 e, const point3 d,
double t0, double t1,
const rectangular_node rectangulars,
rectangular_node *hit_rectangular,
const sphere_node spheres,
sphere_node *hit_sphere)
{
/* set these to not hit */
*hit_rectangular = NULL;
*hit_sphere = NULL;
point3 biased_e;
multiply_vector(d, t0, biased_e);
add_vector(biased_e, e, biased_e);
double nearest = t1;
intersection result, tmpresult;
for (rectangular_node rec = rectangulars; rec; rec = rec->next) {
if (rayRectangularIntersection(biased_e, d, &(rec->element),
&tmpresult, &t1) && (t1 < nearest)) {
/* hit is closest so far */
*hit_rectangular = rec;
nearest = t1;
result = tmpresult;
}
}
/* check the spheres */
for (sphere_node sphere = spheres; sphere; sphere = sphere->next) {
if (raySphereIntersection(biased_e, d, &(sphere->element),
&tmpresult, &t1) && (t1 < nearest)) {
*hit_sphere = sphere;
*hit_rectangular = NULL;
nearest = t1;
result = tmpresult;
}
}
return result;
}
/* @param d direction of ray
* @param w basic vectors
*/
static void rayConstruction(point3 d, const point3 u, const point3 v,
const point3 w, unsigned int i, unsigned int j,
const viewpoint *view, unsigned int width,
unsigned int height)
{
double xmin = -0.0175;
double ymin = -0.0175;
double xmax = 0.0175;
double ymax = 0.0175;
double focal = 0.05;
point3 u_tmp, v_tmp, w_tmp, s;
double w_s = focal;
double u_s = xmin + ((xmax - xmin) * (float) i / (width - 1));
double v_s = ymax + ((ymin - ymax) * (float) j / (height - 1));
/* s = e + u_s * u + v_s * v + w_s * w */
multiply_vector(u, u_s, u_tmp);
multiply_vector(v, v_s, v_tmp);
multiply_vector(w, w_s, w_tmp);
add_vector(view->vrp, u_tmp, s);
add_vector(s, v_tmp, s);
add_vector(s, w_tmp, s);
/* p(t) = e + td = e + t(s - e) */
subtract_vector(s, view->vrp, d);
normalize(d);
}
static void calculateBasisVectors(point3 u, point3 v, point3 w,
const viewpoint *view)
{
/* w */
COPY_POINT3(w, view->vpn);
normalize(w);
/* u = (t x w) / (|t x w|) */
cross_product(w, view->vup, u);
normalize(u);
/* v = w x u */
cross_product(u, w, v);
normalize(v);
}
/* @brief protect color value overflow */
static void protect_color_overflow(color c)
{
for (int i = 0; i < 3; i++)
if (c[i] > 1.0) c[i] = 1.0;
}
static unsigned int ray_color(const point3 e, double t,
const point3 d,
idx_stack *stk,
const rectangular_node rectangulars,
const sphere_node spheres,
const light_node lights,
color object_color, int bounces_left)
{
rectangular_node hit_rec = NULL, light_hit_rec = NULL;
sphere_node hit_sphere = NULL, light_hit_sphere = NULL;
double diffuse, specular;
point3 l, _l, r, rr;
object_fill fill;
color reflection_part;
color refraction_part;
/* might be a reflection ray, so check how many times we've bounced */
if (bounces_left == 0) {
SET_COLOR(object_color, 0.0, 0.0, 0.0);
return 0;
}
/* check for intersection with a sphere or a rectangular */
intersection ip= ray_hit_object(e, d, t, MAX_DISTANCE, rectangulars,
&hit_rec, spheres, &hit_sphere);
if (!hit_rec && !hit_sphere)
return 0;
/* pick the fill of the object that was hit */
fill = hit_rec ?
hit_rec->element.rectangular_fill :
hit_sphere->element.sphere_fill;
void *hit_obj = hit_rec ? (void *) hit_rec : (void *) hit_sphere;
/* assume it is a shadow */
SET_COLOR(object_color, 0.0, 0.0, 0.0);
for (light_node light = lights; light; light = light->next) {
/* calculate the intersection vector pointing at the light */
subtract_vector(ip.point, light->element.position, l);
multiply_vector(l, -1, _l);
normalize(_l);
/* check for intersection with an object. use ignore_me
* because we don't care about this normal
*/
ray_hit_object(ip.point, _l, MIN_DISTANCE, length(l),
rectangulars, &light_hit_rec,
spheres, &light_hit_sphere);
/* the light was not block by itself(lit object) */
if (light_hit_rec || light_hit_sphere)
continue;
compute_specular_diffuse(&diffuse, &specular, d, l,
ip.normal, fill.phong_power);
localColor(object_color, light->element.light_color,
diffuse, specular, &fill);
}
reflection(r, d, ip.normal);
double idx = idx_stack_top(stk).idx, idx_pass = fill.index_of_refraction;
if (idx_stack_top(stk).obj == hit_obj) {
idx_stack_pop(stk);
idx_pass = idx_stack_top(stk).idx;
} else {
idx_stack_element e = { .obj = hit_obj,
.idx = fill.index_of_refraction
};
idx_stack_push(stk, e);
}
refraction(rr, d, ip.normal, idx, idx_pass);
double R = (fill.T > 0.1) ?
fresnel(d, rr, ip.normal, idx, idx_pass) :
1.0;
/* totalColor = localColor +
mix((1-fill.Kd) * fill.R * reflection, T * refraction, R)
*/
if (fill.R > 0) {
/* if we hit something, add the color */
int old_top = stk->top;
if (ray_color(ip.point, MIN_DISTANCE, r, stk, rectangulars, spheres,
lights, reflection_part,
bounces_left - 1)) {
multiply_vector(reflection_part, R * (1.0 - fill.Kd) * fill.R,
reflection_part);
add_vector(object_color, reflection_part,
object_color);
}
stk->top = old_top;
}
/* calculate refraction ray */
if ((length(rr) > 0.0) && (fill.T > 0.0) &&
(fill.index_of_refraction > 0.0)) {
normalize(rr);
if (ray_color(ip.point, MIN_DISTANCE, rr, stk,rectangulars, spheres,
lights, refraction_part,
bounces_left - 1)) {
multiply_vector(refraction_part, (1 - R) * fill.T,
refraction_part);
add_vector(object_color, refraction_part,
object_color);
}
}
protect_color_overflow(object_color);
return 1;
}
/* @param background_color this is not ambient light */
void raytracing(uint8_t *pixels, color background_color,
rectangular_node rectangulars, sphere_node spheres,
light_node lights, const viewpoint *view,
int width, int height)
{
point3 u, v, w, d;
color object_color = { 0.0, 0.0, 0.0 };
/* calculate u, v, w */
calculateBasisVectors(u, v, w, view);
idx_stack stk;
int factor = sqrt(SAMPLES);
#pragma omp parallel for num_threads(256) \
private(stk), private(d), \
private(object_color)
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
double r = 0, g = 0, b = 0;
/* MSAA */
for (int s = 0; s < SAMPLES; s++) {
idx_stack_init(&stk);
rayConstruction(d, u, v, w,
i * factor + s / factor,
j * factor + s % factor,
view,
width * factor, height * factor);
if (ray_color(view->vrp, 0.0, d, &stk, rectangulars, spheres,
lights, object_color,
MAX_REFLECTION_BOUNCES)) {
r += object_color[0];
g += object_color[1];
b += object_color[2];
} else {
r += background_color[0];
g += background_color[1];
b += background_color[2];
}
pixels[((i + (j * width)) * 3) + 0] = r * 255 / SAMPLES;
pixels[((i + (j * width)) * 3) + 1] = g * 255 / SAMPLES;
pixels[((i + (j * width)) * 3) + 2] = b * 255 / SAMPLES;
}
}
}
}
|
GB_unaryop__minv_uint64_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint64_bool
// op(A') function: GB_tran__minv_uint64_bool
// C type: uint64_t
// A type: bool
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 64)
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 64) ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint64_bool
(
uint64_t *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint64_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
common.h | #ifndef __COMMOM_H__
#define __COMMOM_H__
#include <stdint.h>
#include <string.h>
#include "gap_common.h"
#include "gap_cluster.h"
#include "gap_dmamchan.h"
performance_t perf;
#define l2malloc malloc
#define l2free free
#define l1malloc L1_Malloc
#define l1free L1_Free
#define ANSI_COLOR_RED "\x1b[1m\x1b[31m"
#define ANSI_COLOR_GREEN "\x1b[1m\x1b[32m"
#define ANSI_COLOR_RESET "\x1b[0m"
#define SHARED_ICACHE (0x0U)
#define PRIVATE_ICACHE (0x1U)
#define MULTIPORT_ICACHE (0x2U)
#define L2_MEM_BASE_ADDR (0x1C000000)
volatile int PowerBenchActive __attribute__( ( aligned ( 8 ) ) );
int cycles[3] = {0,0,0};
int reg[3][20];
static inline uint32_t checksum_vector(void *v, uint32_t nbElements, uint32_t elementSize)
{
uint32_t cnt = 0ULL;
uint32_t i;
for(i=0; i<nbElements; i++)
cnt += ((uint8_t *) v) [i*elementSize];
return cnt;
}
static inline void memcpy_async(void *dst, void *src, uint32_t size, dma_req_t *req)
{
if (L2_MEM_BASE_ADDR <= (uint32_t) dst) {
// Copy from shared L1 to L2
DMAMCHAN_Memcpy_1D((uint32_t)dst, (uint32_t)src, size, GAP_DMA_TCDM2L2, req);
} else {
// copy from L2 to shared L1
DMAMCHAN_Memcpy_1D((uint32_t)src, (uint32_t)dst, size, GAP_DMA_L22TCDM, req);
}
}
static inline void memcpy_async2D(void *dst, void *src, uint32_t size, uint32_t stride, uint32_t count, dma_req_t *req)
{
if (L2_MEM_BASE_ADDR <= (uint32_t) dst) {
// Copy from shared L1 to L2
DMAMCHAN_Memcpy_2D((uint32_t) dst, (uint32_t)src, size, stride, count, GAP_DMA_TCDM2L2, req);
}
else {
// copy from L2 to shared L1
DMAMCHAN_Memcpy_2D((uint32_t) src, (uint32_t) dst, size, stride, count, GAP_DMA_L22TCDM, req);
}
}
static inline void memcpy_wait(dma_req_t *req)
{
DMAMCHAN_WaitRequestEnd(req);
}
static inline void perf_save_reg(int iter) {
for (int i = 4; i < 20; i++)
reg[iter][i] = *(volatile int*) (CLUSTER_SCBC_BASE + (i) * 4);
}
static inline void perf_reg_print(int iter) {
printf("Time: %d\n", cycles[iter]);
for(int i = 4; i < 8; ++i)
printf("%d,", (int)reg[iter][i]);
printf(" PRI\n");
for(int i = 8; i < 12; ++i)
printf("%d,", (int)reg[iter][i]);
printf(" SP\n");
for(int i = 12; i < 16; ++i)
printf("%d,", (int)reg[iter][i]);
printf(" HIER1\n");
for(int i = 16; i < 20; ++i)
printf("%d,", (int)reg[iter][i]);
printf(" MP-HIER2\n");
}
static inline void
profile_start(int iter)
{
// 0x10201410, Clear counter
CLUSTER_SCBC->ICACHE_CNTS_CLEAR = 0xC1A0FFFFU;
// 0x10201414, Enable counter Start
CLUSTER_SCBC->ICACHE_CNTS_ENABLE = 0xCA550FFFU;
#pragma omp master
{
CLUSTER_TIMERL->COMPARE = 0xFFFFFFFF;
/* Reset Enable Counter */
CLUSTER_TIMERL->CTRL =
(1 << TIMERL_CFG_REG_LOW_ENABLE_Pos)
| (1 << TIMERL_CFG_REG_LOW_RESET_Pos)
| (1 << TIMERL_CFG_REG_LOW_IRQE_Pos)
| (0 << TIMERL_CFG_REG_LOW_IEM_Pos)
| (1 << TIMERL_CFG_REG_LOW_CMP_CLR_Pos)
| (0 << TIMERL_CFG_REG_LOW_ONE_SHOT_Pos)
| (0 << TIMERL_CFG_REG_LOW_PRESCALERE_Pos)
| (0 << TIMERL_CFG_REG_LOW_PRESCALER_Pos)
| (0 << TIMERL_CFG_REG_LOW_CLKS_Pos)
| (0 << TIMERL_CFG_REG_LOW_64BIT_Pos);
/* Reset value */
CLUSTER_TIMERL->VALUE = 0;
if(iter == 2) {
PowerBenchActive = 0XABBAABBA;
}
}
}
static inline void
profile_stop(int iter)
{
#pragma omp master
{
if(iter == 2) {
PowerBenchActive = 0xABBACACA;
}
cycles[iter] = CLUSTER_TIMERL->VALUE;
CLUSTER_TIMERL->CTRL = 0;
perf_save_reg(iter);
}
}
static inline void
profile_show()
{
perf_reg_print(0);
perf_reg_print(1);
perf_reg_print(2);
}
#endif
|
GB_bitmap_add_template.c | //------------------------------------------------------------------------------
// GB_bitmap_add_template: C = A+B, C<M>=A+B, and C<!M>=A+B, C bitmap
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C is bitmap. The mask M can have any sparsity structure, and is efficient
// to apply (all methods are asymptotically optimal). All cases (no M, M, !M)
// are handled.
{
// TODO: the input C can be modified in-place, if it is also bitmap
int64_t cnvals = 0 ;
if (M == NULL)
{
//----------------------------------------------------------------------
// M is not present
//----------------------------------------------------------------------
// ------------------------------------------
// C = A + B
// ------------------------------------------
// bitmap . sparse bitmap
// bitmap . bitmap sparse
// bitmap . bitmap bitmap
ASSERT (A_is_bitmap || B_is_bitmap) ;
ASSERT (!A_is_full) ;
ASSERT (!B_is_full) ;
if (A_is_bitmap && B_is_bitmap)
{
//------------------------------------------------------------------
// Method21: C, A, and B are all bitmap
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
int8_t c = 0 ;
if (Ab [p] && Bb [p])
{
// C (i,j) = A (i,j) + B (i,j)
GB_GETA (aij, Ax, p) ;
GB_GETB (bij, Bx, p) ;
GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ;
c = 1 ;
}
else if (Bb [p])
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, p) ;
c = 1 ;
}
else if (Ab [p])
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, p) ;
c = 1 ;
}
Cb [p] = c ;
task_cnvals += c ;
}
cnvals += task_cnvals ;
}
}
else if (A_is_bitmap)
{
//------------------------------------------------------------------
// Method22: C and A are bitmap; B is sparse or hypersparse
//------------------------------------------------------------------
int64_t p ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
// C (i,j) = A (i,j)
int8_t a = Ab [p] ;
if (a) GB_COPY_A_TO_C (GB_CX (p), Ax, p) ;
Cb [p] = a ;
}
cnvals = A->nvals ;
GB_SLICE_MATRIX (B, 8, chunk) ;
#pragma omp parallel for num_threads(B_nthreads) \
schedule(dynamic,1) reduction(+:cnvals)
for (taskid = 0 ; taskid < B_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Bslice [taskid] ;
int64_t klast = klast_Bslice [taskid] ;
int64_t task_cnvals = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of B(:,k) for this task
int64_t j = GBH (Bh, k) ;
int64_t pB_start, pB_end ;
GB_get_pA (&pB_start, &pB_end, taskid, k, kfirst,
klast, pstart_Bslice, Bp, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over B(:,j), the kth vector of B
for (int64_t pB = pB_start ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
int64_t p = pC_start + i ;
if (Cb [p])
{
// C (i,j) = A (i,j) + B (i,j)
GB_GETA (aij, Ax, p) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
}
else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, pB) ;
Cb [p] = 1 ;
task_cnvals++ ;
}
}
}
cnvals += task_cnvals ;
}
}
else
{
//------------------------------------------------------------------
// Method23: C and B are bitmap; A is sparse or hypersparse
//------------------------------------------------------------------
int64_t p ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
// C (i,j) = B (i,j)
int8_t b = Bb [p] ;
if (b) GB_COPY_B_TO_C (GB_CX (p), Bx, p) ;
Cb [p] = b ;
}
cnvals = B->nvals ;
GB_SLICE_MATRIX (A, 8, chunk) ;
#pragma omp parallel for num_threads(A_nthreads) \
schedule(dynamic,1) reduction(+:cnvals)
for (taskid = 0 ; taskid < A_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Aslice [taskid] ;
int64_t klast = klast_Aslice [taskid] ;
int64_t task_cnvals = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of A(:,k) for this task
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst,
klast, pstart_Aslice, Ap, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over A(:,j), the kth vector of A
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
int64_t p = pC_start + i ;
if (Cb [p])
{
// C (i,j) = A (i,j) + B (i,j)
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, p) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
}
else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, pA) ;
Cb [p] = 1 ;
task_cnvals++ ;
}
}
}
cnvals += task_cnvals ;
}
}
}
else if (M_is_sparse_or_hyper)
{
//----------------------------------------------------------------------
// C is bitmap, M is sparse or hyper and complemented
//----------------------------------------------------------------------
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// bitmap sparse sparse bitmap
// bitmap sparse sparse full
// bitmap sparse bitmap sparse
// bitmap sparse bitmap bitmap
// bitmap sparse bitmap full
// bitmap sparse full sparse
// bitmap sparse full bitmap
// bitmap sparse full full
// M is sparse and complemented. If M is sparse and not
// complemented, then C is constructed as sparse, not bitmap.
ASSERT (Mask_comp) ;
// C(i,j) = A(i,j) + B(i,j) can only be computed where M(i,j) is
// not present in the sparse pattern of M, and where it is present
// but equal to zero.
//----------------------------------------------------------------------
// scatter M into the C bitmap
//----------------------------------------------------------------------
GB_SLICE_MATRIX (M, 8, chunk) ;
#pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < M_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Mslice [taskid] ;
int64_t klast = klast_Mslice [taskid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of M(:,k) for this task
int64_t j = GBH (Mh, k) ;
int64_t pM_start, pM_end ;
GB_get_pA (&pM_start, &pM_end, taskid, k, kfirst,
klast, pstart_Mslice, Mp, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over M(:,j), the kth vector of M
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
// mark C(i,j) if M(i,j) is true
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t i = Mi [pM] ;
int64_t p = pC_start + i ;
Cb [p] = 2 ;
}
}
}
}
// C(i,j) has been marked, in Cb, with the value 2 where M(i,j)=1.
// These positions will not be computed in C(i,j). C(i,j) can only
// be modified where Cb [p] is zero.
//----------------------------------------------------------------------
// compute C<!M>=A+B using the mask scattered in C
//----------------------------------------------------------------------
bool M_cleared = false ;
if ((A_is_bitmap || A_is_full) && (B_is_bitmap || B_is_full))
{
//------------------------------------------------------------------
// Method24(!M,sparse): C is bitmap, both A and B are bitmap or full
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
int8_t c = Cb [p] ;
if (c == 0)
{
// M(i,j) is zero, so C(i,j) can be computed
int8_t a = GBB (Ab, p) ;
int8_t b = GBB (Bb, p) ;
if (a && b)
{
// C (i,j) = A (i,j) + B (i,j)
GB_GETA (aij, Ax, p) ;
GB_GETB (bij, Bx, p) ;
GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ;
c = 1 ;
}
else if (b)
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, p) ;
c = 1 ;
}
else if (a)
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, p) ;
c = 1 ;
}
Cb [p] = c ;
task_cnvals += c ;
}
else
{
// M(i,j) == 1, so C(i,j) is not computed
Cb [p] = 0 ;
}
}
cnvals += task_cnvals ;
}
M_cleared = true ; // M has also been cleared from C
}
else if (A_is_bitmap || A_is_full)
{
//------------------------------------------------------------------
// Method25(!M,sparse): C bitmap, A bitmap or full, B sparse/hyper
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
if (Cb [p] == 0)
{
// C (i,j) = A (i,j)
int8_t a = GBB (Ab, p) ;
if (a) GB_COPY_A_TO_C (GB_CX (p), Ax, p) ;
Cb [p] = a ;
task_cnvals += a ;
}
}
cnvals += task_cnvals ;
}
GB_SLICE_MATRIX (B, 8, chunk) ;
#pragma omp parallel for num_threads(B_nthreads) \
schedule(dynamic,1) reduction(+:cnvals)
for (taskid = 0 ; taskid < B_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Bslice [taskid] ;
int64_t klast = klast_Bslice [taskid] ;
int64_t task_cnvals = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of B(:,k) for this task
int64_t j = GBH (Bh, k) ;
int64_t pB_start, pB_end ;
GB_get_pA (&pB_start, &pB_end, taskid, k, kfirst,
klast, pstart_Bslice, Bp, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over B(:,j), the kth vector of B
for (int64_t pB = pB_start ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
int64_t p = pC_start + i ;
int8_t c = Cb [p] ;
if (c == 1)
{
// C (i,j) = A (i,j) + B (i,j)
GB_GETA (aij, Ax, p) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
}
else if (c == 0)
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, pB) ;
Cb [p] = 1 ;
task_cnvals++ ;
}
}
}
cnvals += task_cnvals ;
}
}
else
{
//------------------------------------------------------------------
// Method26: C bitmap, A sparse or hypersparse, B bitmap or full
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
if (Cb [p] == 0)
{
// C (i,j) = B (i,j)
int8_t b = GBB (Bb, p) ;
if (b) GB_COPY_B_TO_C (GB_CX (p), Bx, p) ;
Cb [p] = b ;
task_cnvals += b ;
}
}
cnvals += task_cnvals ;
}
GB_SLICE_MATRIX (A, 8, chunk) ;
#pragma omp parallel for num_threads(A_nthreads) \
schedule(dynamic,1) reduction(+:cnvals)
for (taskid = 0 ; taskid < A_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Aslice [taskid] ;
int64_t klast = klast_Aslice [taskid] ;
int64_t task_cnvals = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of A(:,k) for this task
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst,
klast, pstart_Aslice, Ap, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over A(:,j), the kth vector of A
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
int64_t p = pC_start + i ;
int8_t c = Cb [p] ;
if (c == 1)
{
// C (i,j) = A (i,j) + B (i,j)
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, p) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
}
else if (c == 0)
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, pA) ;
Cb [p] = 1 ;
task_cnvals++ ;
}
}
}
cnvals += task_cnvals ;
}
}
//---------------------------------------------------------------------
// clear M from C
//---------------------------------------------------------------------
if (!M_cleared)
{
// This step is required if either A or B are sparse/hyper (if
// one is sparse/hyper, the other must be bitmap). It requires
// an extra pass over the mask M, so this might be slower than
// postponing the application of the mask, and doing it later.
#pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1)
for (taskid = 0 ; taskid < M_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Mslice [taskid] ;
int64_t klast = klast_Mslice [taskid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of M(:,k) for this task
int64_t j = GBH (Mh, k) ;
int64_t pM_start, pM_end ;
GB_get_pA (&pM_start, &pM_end, taskid, k, kfirst,
klast, pstart_Mslice, Mp, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over M(:,j), the kth vector of M
for (int64_t pM = pM_start ; pM < pM_end ; pM++)
{
// mark C(i,j) if M(i,j) is true
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t i = Mi [pM] ;
int64_t p = pC_start + i ;
Cb [p] = 0 ;
}
}
}
}
}
}
else
{
//----------------------------------------------------------------------
// C is bitmap; M is bitmap or full
//----------------------------------------------------------------------
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// bitmap bitmap sparse bitmap
// bitmap bitmap sparse full
// bitmap bitmap bitmap sparse
// bitmap bitmap bitmap bitmap
// bitmap bitmap bitmap full
// bitmap bitmap full sparse
// bitmap bitmap full bitmap
// bitmap bitmap full full
// ------------------------------------------
// C <M> = A + B
// ------------------------------------------
// bitmap full sparse bitmap
// bitmap full sparse full
// bitmap full bitmap sparse
// bitmap full bitmap bitmap
// bitmap full bitmap full
// bitmap full full sparse
// bitmap full full bitmap
// bitmap full full full
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// bitmap bitmap sparse sparse
// bitmap bitmap sparse bitmap
// bitmap bitmap sparse full
// bitmap bitmap bitmap sparse
// bitmap bitmap bitmap bitmap
// bitmap bitmap bitmap full
// bitmap bitmap full sparse
// bitmap bitmap full bitmap
// bitmap bitmap full full
// ------------------------------------------
// C <!M> = A + B
// ------------------------------------------
// bitmap full sparse sparse
// bitmap full sparse bitmap
// bitmap full sparse full
// bitmap full bitmap sparse
// bitmap full bitmap bitmap
// bitmap full bitmap full
// bitmap full full sparse
// bitmap full full bitmap
// bitmap full full full
ASSERT (M_is_bitmap || M_is_full) ;
ASSERT (A_is_bitmap || A_is_full || B_is_bitmap || B_is_full) ;
#undef GB_GET_MIJ
#define GB_GET_MIJ(p) \
bool mij = GBB (Mb, p) && GB_mcast (Mx, p, msize) ; \
if (Mask_comp) mij = !mij ;
if ((A_is_bitmap || A_is_full) && (B_is_bitmap || B_is_full))
{
//------------------------------------------------------------------
// Method27: C is bitmap; M, A, and B are bitmap or full
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
GB_GET_MIJ (p) ;
if (mij)
{
// M(i,j) is true, so C(i,j) can be computed
int8_t a = GBB (Ab, p) ;
int8_t b = GBB (Bb, p) ;
int8_t c = 0 ;
if (a && b)
{
// C (i,j) = A (i,j) + B (i,j)
GB_GETA (aij, Ax, p) ;
GB_GETB (bij, Bx, p) ;
GB_BINOP (GB_CX (p), aij, bij, p % vlen, p / vlen) ;
c = 1 ;
}
else if (b)
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, p) ;
c = 1 ;
}
else if (a)
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, p) ;
c = 1 ;
}
Cb [p] = c ;
task_cnvals += c ;
}
else
{
// M(i,j) == 1, so C(i,j) is not computed
Cb [p] = 0 ;
}
}
cnvals += task_cnvals ;
}
}
else if (A_is_bitmap || A_is_full)
{
//------------------------------------------------------------------
// Method28: C bitmap; M and A bitmap or full; B sparse or hyper
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
GB_GET_MIJ (p) ;
if (mij)
{
// C (i,j) = A (i,j)
int8_t a = GBB (Ab, p) ;
if (a) GB_COPY_A_TO_C (GB_CX (p), Ax, p) ;
Cb [p] = a ;
task_cnvals += a ;
}
else
{
Cb [p] = 0 ;
}
}
cnvals += task_cnvals ;
}
GB_SLICE_MATRIX (B, 8, chunk) ;
#pragma omp parallel for num_threads(B_nthreads) \
schedule(dynamic,1) reduction(+:cnvals)
for (taskid = 0 ; taskid < B_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Bslice [taskid] ;
int64_t klast = klast_Bslice [taskid] ;
int64_t task_cnvals = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of B(:,k) for this task
int64_t j = GBH (Bh, k) ;
int64_t pB_start, pB_end ;
GB_get_pA (&pB_start, &pB_end, taskid, k, kfirst,
klast, pstart_Bslice, Bp, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over B(:,j), the kth vector of B
for (int64_t pB = pB_start ; pB < pB_end ; pB++)
{
int64_t i = Bi [pB] ;
int64_t p = pC_start + i ;
GB_GET_MIJ (p) ;
if (mij)
{
int8_t c = Cb [p] ;
if (c == 1)
{
// C (i,j) = A (i,j) + B (i,j)
GB_GETA (aij, Ax, p) ;
GB_GETB (bij, Bx, pB) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
}
else
{
// C (i,j) = B (i,j)
GB_COPY_B_TO_C (GB_CX (p), Bx, pB) ;
Cb [p] = 1 ;
task_cnvals++ ;
}
}
}
}
cnvals += task_cnvals ;
}
}
else
{
//------------------------------------------------------------------
// Method29: C bitmap; M and B bitmap or full; A sparse or hyper
//------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(C_nthreads) schedule(static) \
reduction(+:cnvals)
for (tid = 0 ; tid < C_nthreads ; tid++)
{
int64_t pstart, pend, task_cnvals = 0 ;
GB_PARTITION (pstart, pend, cnz, tid, C_nthreads) ;
for (int64_t p = pstart ; p < pend ; p++)
{
GB_GET_MIJ (p) ;
if (mij)
{
// C (i,j) = B (i,j)
int8_t b = GBB (Bb, p) ;
if (b) GB_COPY_B_TO_C (GB_CX (p), Bx, p) ;
Cb [p] = b ;
task_cnvals += b ;
}
else
{
Cb [p] = 0 ;
}
}
cnvals += task_cnvals ;
}
GB_SLICE_MATRIX (A, 8, chunk) ;
#pragma omp parallel for num_threads(A_nthreads) \
schedule(dynamic,1) reduction(+:cnvals)
for (taskid = 0 ; taskid < A_ntasks ; taskid++)
{
int64_t kfirst = kfirst_Aslice [taskid] ;
int64_t klast = klast_Aslice [taskid] ;
int64_t task_cnvals = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// find the part of A(:,k) for this task
int64_t j = GBH (Ah, k) ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, taskid, k, kfirst,
klast, pstart_Aslice, Ap, vlen) ;
int64_t pC_start = j * vlen ;
// traverse over A(:,j), the kth vector of A
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
int64_t p = pC_start + i ;
GB_GET_MIJ (p) ;
if (mij)
{
int8_t c = Cb [p] ;
if (c == 1)
{
// C (i,j) = A (i,j) + B (i,j)
GB_GETA (aij, Ax, pA) ;
GB_GETB (bij, Bx, p) ;
GB_BINOP (GB_CX (p), aij, bij, i, j) ;
}
else
{
// C (i,j) = A (i,j)
GB_COPY_A_TO_C (GB_CX (p), Ax, pA) ;
Cb [p] = 1 ;
task_cnvals++ ;
}
}
}
}
cnvals += task_cnvals ;
}
}
}
C->nvals = cnvals ;
}
|
ripemd_fmt_plug.c | /* ripemd cracker patch for JtR. Hacked together during April of 2013 by Dhiru
* Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and
* it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_ripemd_160;
extern struct fmt_main fmt_ripemd_128;
#elif FMT_REGISTERS_H
john_register_one(&fmt_ripemd_160);
john_register_one(&fmt_ripemd_128);
#else
#include <string.h>
#include "arch.h"
#include "sph_ripemd.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
// OMP_SCALE tuned on core i7 quad core HT
// 128 160
// 1 - 234k 234k
// 64 - 7547k 6310k
// 128 - 9849k 7987k
// 256 - 11835k 9205k
// 512 - 13288k 10027k
// 1k - 14142k 10553k
// 2k - 14607k 11980k ** this level chosen
// 4k - 14828k 10871k
// 8k - 14639k 10794k
#ifndef OMP_SCALE
#ifdef __MIC__
#define OMP_SCALE 64
#else
#define OMP_SCALE 2048
#endif // __MIC__
#endif // OMP_SCALE
#endif // _OPENMP
#include "memdbg.h"
#define FORMAT_TAG "$ripemd$"
#define TAG_LENGTH (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE160 20
#define BINARY_SIZE128 16
#define SALT_SIZE 0
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
static struct fmt_tests ripemd_160_tests[] = {
{"9c1185a5c5e9fc54612808977ee8f548b2258d31", ""},
{"$ripemd$9c1185a5c5e9fc54612808977ee8f548b2258d31", ""},
{"56e11fdd5479b30020fc010551536af074e1b82f", "thisisalongstring"},
{"$ripemd$56e11fdd5479b30020fc010551536af074e1b82f", "thisisalongstring"},
{"a1a94e392ce7d861a4fdcaa291e453c082807f50", "string with space"},
{"$ripemd$a1a94e392ce7d861a4fdcaa291e453c082807f50", "string with space"},
{"98f3860a474d986964df9c1fd3621e68eaf76a25", "UPPERCASE"},
{"$ripemd$98f3860a474d986964df9c1fd3621e68eaf76a25", "UPPERCASE"},
{"d3d0379126c1e5e0ba70ad6e5e53ff6aeab9f4fa", "123456789"},
{"$ripemd$d3d0379126c1e5e0ba70ad6e5e53ff6aeab9f4fa", "123456789"},
{NULL}
};
static struct fmt_tests ripemd_128_tests[] = {
{"cdf26213a150dc3ecb610f18f6b38b46", ""},
{"$ripemd$cdf26213a150dc3ecb610f18f6b38b46", ""},
{"060d8817be332f6e6a9a09a209ea453e", "thisisalongstring"},
{"$ripemd$060d8817be332f6e6a9a09a209ea453e", "thisisalongstring"},
{"ed402bdf044344c34935ac93a2d90a13", "string with space"},
{"$ripemd$ed402bdf044344c34935ac93a2d90a13", "string with space"},
{"5e71f949a0d5c69f3c1aeaf245ba527a", "UPPERCASE"},
{"$ripemd$5e71f949a0d5c69f3c1aeaf245ba527a", "UPPERCASE"},
{"1886db8acdcbfeab1e7ee3780400536f", "123456789"},
{"$ripemd$1886db8acdcbfeab1e7ee3780400536f", "123456789"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE160 / sizeof(uint32_t)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
if (!saved_key) {
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self, int len)
{
char *p;
p = ciphertext;
if (!strncmp(p, FORMAT_TAG, TAG_LENGTH))
p += TAG_LENGTH;
if (strlen(p) != len)
return 0;
while(*p)
if (atoi16[ARCH_INDEX(*p++)]==0x7f)
return 0;
return 1;
}
static int valid160(char *ciphertext, struct fmt_main *self)
{
return valid(ciphertext, self, 40);
}
static int valid128(char *ciphertext, struct fmt_main *self)
{
return valid(ciphertext, self, 32);
}
static void *get_binary_160(char *ciphertext)
{
static union {
unsigned char c[20];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = strrchr(ciphertext, '$') + 1;
else
p = ciphertext;
for (i = 0; i < 20; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void *get_binary_128(char *ciphertext)
{
static union {
unsigned char c[16];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
p = strrchr(ciphertext, '$') + 1;
else
p = ciphertext;
for (i = 0; i < 16; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int crypt_160(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
sph_ripemd160_context ctx;
sph_ripemd160_init(&ctx);
sph_ripemd160(&ctx, saved_key[index], strlen(saved_key[index]));
sph_ripemd160_close(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int crypt_128(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
sph_ripemd128_context ctx;
sph_ripemd128_init(&ctx);
sph_ripemd128(&ctx, saved_key[index], strlen(saved_key[index]));
sph_ripemd128_close(&ctx, (unsigned char*)crypt_out[index]);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one128(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE128);
}
static int cmp_one160(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE160);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void ripemd_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[TAG_LENGTH + 2 * BINARY_SIZE160 + 1];
if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH))
ciphertext += TAG_LENGTH;
strcpy(out, FORMAT_TAG);
strcpy(&out[TAG_LENGTH], ciphertext);
strlwr(&out[TAG_LENGTH]);
return out;
}
struct fmt_main fmt_ripemd_160 = {
{
"ripemd-160",
"RIPEMD 160",
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE160,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
ripemd_160_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid160,
split,
get_binary_160,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
ripemd_set_key,
get_key,
fmt_default_clear_keys,
crypt_160,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one160,
cmp_exact
}
};
struct fmt_main fmt_ripemd_128 = {
{
"ripemd-128",
"RIPEMD 128",
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE128,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
ripemd_128_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid128,
split,
get_binary_128,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
ripemd_set_key,
get_key,
fmt_default_clear_keys,
crypt_128,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one128,
cmp_exact
}
};
#endif /* plugin stanza */
|
kpoint.c | /* kpoint.c */
/* Copyright (C) 2008 Atsushi Togo */
#include <stdio.h>
#include <stdlib.h>
#include "mathfunc.h"
#include "kpoint.h"
#include "debug.h"
static int search_space[][3] = {
{0, 0, 0},
{0, 0, 1},
{0, 1, -1},
{0, 1, 0},
{0, 1, 1},
{1, -1, -1},
{1, -1, 0},
{1, -1, 1},
{1, 0, -1},
{1, 0, 0},
{1, 0, 1},
{1, 1, -1},
{1, 1, 0},
{1, 1, 1},
{-1, -1, -1},
{-1, -1, 0},
{-1, -1, 1},
{-1, 0, -1},
{-1, 0, 0},
{-1, 0, 1},
{-1, 1, -1},
{-1, 1, 0},
{-1, 1, 1},
{0, -1, -1},
{0, -1, 0},
{0, -1, 1},
{0, 0, -1}
};
static MatINT *get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal);
static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const int num_q,
SPGCONST double qpoints[][3]);
static int get_ir_reciprocal_mesh(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT * rot_reciprocal);
static int
get_ir_reciprocal_mesh_openmp(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT* rot_reciprocal);
static int relocate_BZ_grid_address(int bz_grid_address[][3],
int bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3]);
static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3]);
static int get_ir_triplets_at_q(int map_triplets[],
int map_q[],
int grid_address[][3],
const int grid_point,
const int mesh[3],
const MatINT * rot_reciprocal);
static int get_BZ_triplets_at_q(int triplets[][3],
const int grid_point,
SPGCONST int bz_grid_address[][3],
const int bz_map[],
const int map_triplets[],
const int num_map_triplets,
const int mesh[3]);
static int get_third_q_of_triplets_at_q(int address[3][3],
const int q_index,
const int bz_map[],
const int mesh[3],
const int bzmesh[3],
const int bzmesh_double[3]);
static int get_grid_point(const int grid_double[3],
const int mesh[3]);
static void grid_point_to_grid_double(int grid_double[3],
const int grid_point,
const int mesh[3],
const int is_shift[3]);
static void get_grid_address(int address[3],
const int grid_double[3],
const int mesh[3]);
static void get_vector_modulo(int v[3], const int m[3]);
/* grid_address (e.g. 4x4x4 mesh, unless GRID_ORDER_XYZ is defined) */
/* [[ 0 0 0] */
/* [ 1 0 0] */
/* [ 2 0 0] */
/* [-1 0 0] */
/* [ 0 1 0] */
/* [ 1 1 0] */
/* [ 2 1 0] */
/* [-1 1 0] */
/* .... ] */
/* */
/* Each value of 'map' correspnds to the index of grid_point. */
int kpt_get_irreducible_reciprocal_mesh(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const int is_time_reversal,
const MatINT *rotations)
{
int num_ir;
MatINT *rot_reciprocal;
rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal);
#ifdef _OPENMP
num_ir = get_ir_reciprocal_mesh_openmp(grid_address,
map,
mesh,
is_shift,
rot_reciprocal);
#else
num_ir = get_ir_reciprocal_mesh(grid_address,
map,
mesh,
is_shift,
rot_reciprocal);
#endif
mat_free_MatINT(rot_reciprocal);
return num_ir;
}
int kpt_get_stabilized_reciprocal_mesh(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const int is_time_reversal,
const MatINT * rotations,
const int num_q,
SPGCONST double qpoints[][3])
{
int num_ir;
MatINT *rot_reciprocal, *rot_reciprocal_q;
double tolerance;
rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal);
tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]);
rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal,
tolerance,
num_q,
qpoints);
#ifdef _OPENMP
num_ir = get_ir_reciprocal_mesh_openmp(grid_address,
map,
mesh,
is_shift,
rot_reciprocal_q);
#else
num_ir = get_ir_reciprocal_mesh(grid_address,
map,
mesh,
is_shift,
rot_reciprocal_q);
#endif
mat_free_MatINT(rot_reciprocal_q);
mat_free_MatINT(rot_reciprocal);
return num_ir;
}
void kpt_get_grid_points_by_rotations(int rot_grid_points[],
const int address_orig[3],
const MatINT * rot_reciprocal,
const int mesh[3],
const int is_shift[3])
{
int i;
int address_double_orig[3], address_double[3], mesh_double[3];
for (i = 0; i < 3; i++) {
mesh_double[i] = mesh[i] * 2;
address_double_orig[i] = address_orig[i] * 2 + is_shift[i];
}
for (i = 0; i < rot_reciprocal->size; i++) {
mat_multiply_matrix_vector_i3(address_double,
rot_reciprocal->mat[i],
address_double_orig);
get_vector_modulo(address_double, mesh_double);
rot_grid_points[i] = get_grid_point(address_double, mesh);
}
}
void kpt_get_BZ_grid_points_by_rotations(int rot_grid_points[],
const int address_orig[3],
const MatINT * rot_reciprocal,
const int mesh[3],
const int is_shift[3],
const int bz_map[])
{
int i;
int address_double_orig[3], address_double[3], mesh_double[3], bzmesh_double[3];
for (i = 0; i < 3; i++) {
mesh_double[i] = mesh[i] * 2;
bzmesh_double[i] = mesh[i] * 4;
address_double_orig[i] = address_orig[i] * 2 + is_shift[i];
}
for (i = 0; i < rot_reciprocal->size; i++) {
mat_multiply_matrix_vector_i3(address_double,
rot_reciprocal->mat[i],
address_double_orig);
get_vector_modulo(address_double, bzmesh_double);
rot_grid_points[i] = bz_map[get_grid_point(address_double, mesh_double)];
}
}
int kpt_relocate_BZ_grid_address(int bz_grid_address[][3],
int bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3])
{
return relocate_BZ_grid_address(bz_grid_address,
bz_map,
grid_address,
mesh,
rec_lattice,
is_shift);
}
int kpt_get_ir_triplets_at_q(int map_triplets[],
int map_q[],
int grid_address[][3],
const int grid_point,
const int mesh[3],
const int is_time_reversal,
const MatINT * rotations)
{
int num_ir;
MatINT *rot_reciprocal;
rot_reciprocal = get_point_group_reciprocal(rotations, is_time_reversal);
num_ir = get_ir_triplets_at_q(map_triplets,
map_q,
grid_address,
grid_point,
mesh,
rot_reciprocal);
mat_free_MatINT(rot_reciprocal);
return num_ir;
}
int kpt_get_BZ_triplets_at_q(int triplets[][3],
const int grid_point,
SPGCONST int bz_grid_address[][3],
const int bz_map[],
const int map_triplets[],
const int num_map_triplets,
const int mesh[3])
{
return get_BZ_triplets_at_q(triplets,
grid_point,
bz_grid_address,
bz_map,
map_triplets,
num_map_triplets,
mesh);
}
void kpt_get_neighboring_grid_points(int neighboring_grid_points[],
const int grid_point,
SPGCONST int relative_grid_address[][3],
const int num_relative_grid_address,
const int mesh[3],
SPGCONST int bz_grid_address[][3],
const int bz_map[])
{
int mesh_double[3], bzmesh[3], bzmesh_double[3],
address_double[3], bz_address_double[3];
int i, j, bz_gp;
for (i = 0; i < 3; i++) {
mesh_double[i] = mesh[i] * 2;
bzmesh[i] = mesh[i] * 2;
bzmesh_double[i] = bzmesh[i] * 2;
}
for (i = 0; i < num_relative_grid_address; i++) {
for (j = 0; j < 3; j++) {
address_double[j] = (bz_grid_address[grid_point][j] +
relative_grid_address[i][j]) * 2;
bz_address_double[j] = address_double[j];
}
get_vector_modulo(bz_address_double, bzmesh_double);
bz_gp = bz_map[get_grid_point(bz_address_double, bzmesh)];
if (bz_gp == -1) {
get_vector_modulo(address_double, mesh_double);
neighboring_grid_points[i] = get_grid_point(address_double, mesh);
} else {
neighboring_grid_points[i] = bz_gp;
}
}
}
static MatINT *get_point_group_reciprocal(const MatINT * rotations,
const int is_time_reversal)
{
int i, j, num_rot;
MatINT *rot_reciprocal, *rot_return;
int *unique_rot;
SPGCONST int inversion[3][3] = {
{-1, 0, 0 },
{ 0,-1, 0 },
{ 0, 0,-1 }
};
if (is_time_reversal) {
rot_reciprocal = mat_alloc_MatINT(rotations->size * 2);
} else {
rot_reciprocal = mat_alloc_MatINT(rotations->size);
}
unique_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size);
for (i = 0; i < rot_reciprocal->size; i++) {
unique_rot[i] = -1;
}
for (i = 0; i < rotations->size; i++) {
mat_transpose_matrix_i3(rot_reciprocal->mat[i], rotations->mat[i]);
if (is_time_reversal) {
mat_multiply_matrix_i3(rot_reciprocal->mat[rotations->size+i],
inversion,
rot_reciprocal->mat[i]);
}
}
num_rot = 0;
for (i = 0; i < rot_reciprocal->size; i++) {
for (j = 0; j < num_rot; j++) {
if (mat_check_identity_matrix_i3(rot_reciprocal->mat[unique_rot[j]],
rot_reciprocal->mat[i])) {
goto escape;
}
}
unique_rot[num_rot] = i;
num_rot++;
escape:
;
}
rot_return = mat_alloc_MatINT(num_rot);
for (i = 0; i < num_rot; i++) {
mat_copy_matrix_i3(rot_return->mat[i], rot_reciprocal->mat[unique_rot[i]]); }
free(unique_rot);
mat_free_MatINT(rot_reciprocal);
return rot_return;
}
static MatINT *get_point_group_reciprocal_with_q(const MatINT * rot_reciprocal,
const double symprec,
const int num_q,
SPGCONST double qpoints[][3])
{
int i, j, k, l, is_all_ok, num_rot;
int *ir_rot;
double q_rot[3], diff[3];
MatINT * rot_reciprocal_q;
is_all_ok = 0;
num_rot = 0;
ir_rot = (int*)malloc(sizeof(int) * rot_reciprocal->size);
for (i = 0; i < rot_reciprocal->size; i++) {
ir_rot[i] = -1;
}
for (i = 0; i < rot_reciprocal->size; i++) {
for (j = 0; j < num_q; j++) {
is_all_ok = 0;
mat_multiply_matrix_vector_id3(q_rot,
rot_reciprocal->mat[i],
qpoints[j]);
for (k = 0; k < num_q; k++) {
for (l = 0; l < 3; l++) {
diff[l] = q_rot[l] - qpoints[k][l];
diff[l] -= mat_Nint(diff[l]);
}
if (mat_Dabs(diff[0]) < symprec &&
mat_Dabs(diff[1]) < symprec &&
mat_Dabs(diff[2]) < symprec) {
is_all_ok = 1;
break;
}
}
if (! is_all_ok) {
break;
}
}
if (is_all_ok) {
ir_rot[num_rot] = i;
num_rot++;
}
}
rot_reciprocal_q = mat_alloc_MatINT(num_rot);
for (i = 0; i < num_rot; i++) {
mat_copy_matrix_i3(rot_reciprocal_q->mat[i],
rot_reciprocal->mat[ir_rot[i]]);
}
free(ir_rot);
return rot_reciprocal_q;
}
static int get_ir_reciprocal_mesh(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT *rot_reciprocal)
{
/* In the following loop, mesh is doubled. */
/* Even and odd mesh numbers correspond to */
/* is_shift[i] = 0 and 1, respectively. */
/* is_shift = [0,0,0] gives Gamma center mesh. */
/* grid: reducible grid points */
/* map: the mapping from each point to ir-point. */
int i, j, k, l, grid_point, grid_point_rot, num_ir = 0;
int grid_double[3], grid_rot[3], mesh_double[3];
for (i = 0; i < 3; i++) {
mesh_double[i] = mesh[i] * 2;
}
/* "-1" means the element is not touched yet. */
for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) {
map[i] = -1;
}
#ifndef GRID_ORDER_XYZ
for (i = 0; i < mesh[2]; i++) {
for (j = 0; j < mesh[1]; j++) {
for (k = 0; k < mesh[0]; k++) {
grid_double[0] = k * 2 + is_shift[0];
grid_double[1] = j * 2 + is_shift[1];
grid_double[2] = i * 2 + is_shift[2];
#else
for (i = 0; i < mesh[0]; i++) {
for (j = 0; j < mesh[1]; j++) {
for (k = 0; k < mesh[2]; k++) {
grid_double[0] = i * 2 + is_shift[0];
grid_double[1] = j * 2 + is_shift[1];
grid_double[2] = k * 2 + is_shift[2];
#endif
grid_point = get_grid_point(grid_double, mesh);
get_grid_address(grid_address[grid_point], grid_double, mesh);
for (l = 0; l < rot_reciprocal->size; l++) {
mat_multiply_matrix_vector_i3(grid_rot,
rot_reciprocal->mat[l],
grid_double);
get_vector_modulo(grid_rot, mesh_double);
grid_point_rot = get_grid_point(grid_rot, mesh);
if (grid_point_rot > -1) { /* Invalid if even --> odd or odd --> even */
if (map[grid_point_rot] > -1) {
map[grid_point] = map[grid_point_rot];
break;
}
}
}
if (map[grid_point] == -1) {
map[grid_point] = grid_point;
num_ir++;
}
}
}
}
return num_ir;
}
static int
get_ir_reciprocal_mesh_openmp(int grid_address[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const MatINT * rot_reciprocal)
{
int i, j, k, l, grid_point, grid_point_rot, num_ir;
int grid_double[3], grid_rot[3], mesh_double[3];
for (i = 0; i < 3; i++) {
mesh_double[i] = mesh[i] * 2;
}
#ifndef GRID_ORDER_XYZ
#pragma omp parallel for private(j, k, l, grid_point, grid_point_rot, grid_double, grid_rot)
for (i = 0; i < mesh[2]; i++) {
for (j = 0; j < mesh[1]; j++) {
for (k = 0; k < mesh[0]; k++) {
grid_double[0] = k * 2 + is_shift[0];
grid_double[1] = j * 2 + is_shift[1];
grid_double[2] = i * 2 + is_shift[2];
#else
#pragma omp parallel for private(j, k, l, grid_point, grid_point_rot, grid_double, grid_rot)
for (i = 0; i < mesh[0]; i++) {
for (j = 0; j < mesh[1]; j++) {
for (k = 0; k < mesh[2]; k++) {
grid_double[0] = i * 2 + is_shift[0];
grid_double[1] = j * 2 + is_shift[1];
grid_double[2] = k * 2 + is_shift[2];
#endif
grid_point = get_grid_point(grid_double, mesh);
map[grid_point] = grid_point;
get_grid_address(grid_address[grid_point], grid_double, mesh);
for (l = 0; l < rot_reciprocal->size; l++) {
mat_multiply_matrix_vector_i3(grid_rot,
rot_reciprocal->mat[l],
grid_double);
get_vector_modulo(grid_rot, mesh_double);
grid_point_rot = get_grid_point(grid_rot, mesh);
if (grid_point_rot > -1) { /* Invalid if even --> odd or odd --> even */
if (grid_point_rot < map[grid_point]) {
map[grid_point] = grid_point_rot;
}
}
}
}
}
}
num_ir = 0;
#pragma omp parallel for reduction(+:num_ir)
for (i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++) {
if (map[i] == i) {
num_ir++;
}
}
return num_ir;
}
/* Relocate grid addresses to first Brillouin zone */
/* bz_grid_address[prod(mesh + 1)][3] */
/* bz_map[prod(mesh * 2)] */
static int relocate_BZ_grid_address(int bz_grid_address[][3],
int bz_map[],
SPGCONST int grid_address[][3],
const int mesh[3],
SPGCONST double rec_lattice[3][3],
const int is_shift[3])
{
double tolerance, min_distance;
double vector[3], distance[27];
int bzmesh[3], bzmesh_double[3], address_double[3];
int i, j, k, min_index, boundary_num_gp, total_num_gp, bzgp, gp;
tolerance = get_tolerance_for_BZ_reduction(rec_lattice);
for (i = 0; i < 3; i++) {
bzmesh[i] = mesh[i] * 2;
bzmesh_double[i] = bzmesh[i] * 2;
}
for (i = 0; i < bzmesh[0] * bzmesh[1] * bzmesh[2]; i++) {
bz_map[i] = -1;
}
boundary_num_gp = 0;
total_num_gp = mesh[0] * mesh[1] * mesh[2];
for (i = 0; i < total_num_gp; i++) {
for (j = 0; j < 27; j++) {
for (k = 0; k < 3; k++) {
address_double[k] =
(grid_address[i][k] + search_space[j][k] * mesh[k]) * 2 + is_shift[k];
}
mat_multiply_matrix_vector_di3(vector, rec_lattice, address_double);
distance[j] = mat_norm_squared_d3(vector);
}
min_distance = distance[0];
min_index = 0;
for (j = 1; j < 27; j++) {
if (distance[j] + tolerance < min_distance) {
min_distance = distance[j];
min_index = j;
}
}
for (j = 0; j < 27; j++) {
if (distance[j] < min_distance + tolerance) {
if (j == min_index) {
gp = i;
} else {
gp = boundary_num_gp + total_num_gp;
}
for (k = 0; k < 3; k++) {
bz_grid_address[gp][k] =
grid_address[i][k] + search_space[j][k] * mesh[k];
address_double[k] = bz_grid_address[gp][k] * 2 + is_shift[k];
if (address_double[k] < 0) {
address_double[k] += bzmesh_double[k];
}
}
bzgp = get_grid_point(address_double, bzmesh);
bz_map[bzgp] = gp;
if (j != min_index) {
boundary_num_gp++;
}
}
}
}
return boundary_num_gp + total_num_gp;
}
static double get_tolerance_for_BZ_reduction(SPGCONST double rec_lattice[3][3])
{
int i, j;
double tolerance;
double length[3];
for (i = 0; i < 3; i++) {
length[i] = 0;
for (j = 0; j < 3; j++) {
length[i] += rec_lattice[j][i] * rec_lattice[j][i];
}
}
tolerance = length[0];
for (i = 1; i < 3; i++) {
if (tolerance > length[i]) {
tolerance = length[i];
}
}
tolerance *= 0.01;
return tolerance;
}
static int get_ir_triplets_at_q(int map_triplets[],
int map_q[],
int grid_address[][3],
const int grid_point,
const int mesh[3],
const MatINT * rot_reciprocal)
{
int i, j, num_grid, q_2, num_ir_q, num_ir_triplets, ir_grid_point;
int mesh_double[3], is_shift[3];
int grid_double0[3], grid_double1[3], grid_double2[3];
int *ir_grid_points, *third_q;
double tolerance;
double stabilizer_q[1][3];
MatINT *rot_reciprocal_q;
tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]);
num_grid = mesh[0] * mesh[1] * mesh[2];
for (i = 0; i < 3; i++) {
/* Only consider the gamma-point */
is_shift[i] = 0;
mesh_double[i] = mesh[i] * 2;
}
/* Search irreducible q-points (map_q) with a stabilizer */
grid_point_to_grid_double(grid_double0, grid_point, mesh, is_shift); /* q */
for (i = 0; i < 3; i++) {
stabilizer_q[0][i] =
(double)grid_double0[i] / mesh_double[i] - (grid_double0[i] > mesh[i]);
}
rot_reciprocal_q = get_point_group_reciprocal_with_q(rot_reciprocal,
tolerance,
1,
stabilizer_q);
#ifdef _OPENMP
num_ir_q = get_ir_reciprocal_mesh_openmp(grid_address,
map_q,
mesh,
is_shift,
rot_reciprocal_q);
#else
num_ir_q = get_ir_reciprocal_mesh(grid_address,
map_q,
mesh,
is_shift,
rot_reciprocal_q);
#endif
mat_free_MatINT(rot_reciprocal_q);
third_q = (int*) malloc(sizeof(int) * num_ir_q);
ir_grid_points = (int*) malloc(sizeof(int) * num_ir_q);
num_ir_q = 0;
for (i = 0; i < num_grid; i++) {
if (map_q[i] == i) {
ir_grid_points[num_ir_q] = i;
num_ir_q++;
}
map_triplets[i] = -1;
}
#pragma omp parallel for private(j, grid_double1, grid_double2)
for (i = 0; i < num_ir_q; i++) {
grid_point_to_grid_double(grid_double1, ir_grid_points[i], mesh, is_shift); /* q' */
for (j = 0; j < 3; j++) { /* q'' */
grid_double2[j] = - grid_double0[j] - grid_double1[j];
}
get_vector_modulo(grid_double2, mesh_double);
third_q[i] = get_grid_point(grid_double2, mesh);
}
num_ir_triplets = 0;
for (i = 0; i < num_ir_q; i++) {
ir_grid_point = ir_grid_points[i];
q_2 = third_q[i];
if (map_triplets[map_q[q_2]] > -1) {
map_triplets[ir_grid_point] = map_q[q_2];
} else {
map_triplets[ir_grid_point] = ir_grid_point;
num_ir_triplets++;
}
}
#pragma omp parallel for
for (i = 0; i < num_grid; i++) {
map_triplets[i] = map_triplets[map_q[i]];
}
free(third_q);
third_q = NULL;
free(ir_grid_points);
ir_grid_points = NULL;
return num_ir_triplets;
}
static int get_BZ_triplets_at_q(int triplets[][3],
const int grid_point,
SPGCONST int bz_grid_address[][3],
const int bz_map[],
const int map_triplets[],
const int num_map_triplets,
const int mesh[3])
{
int i, j, k, num_ir;
int address[3][3], address_double[3], bzmesh[3], bzmesh_double[3];
int *ir_grid_points;
for (i = 0; i < 3; i++) {
bzmesh[i] = mesh[i] * 2;
bzmesh_double[i] = bzmesh[i] * 2;
}
num_ir = 0;
ir_grid_points = (int*) malloc(sizeof(int) * num_map_triplets);
for (i = 0; i < num_map_triplets; i++) {
if (map_triplets[i] == i) {
ir_grid_points[num_ir] = i;
num_ir++;
}
}
#pragma omp parallel for private(j, k, address, address_double)
for (i = 0; i < num_ir; i++) {
for (j = 0; j < 3; j++) {
address[0][j] = bz_grid_address[grid_point][j];
address[1][j] = bz_grid_address[ir_grid_points[i]][j];
address[2][j] = - address[0][j] - address[1][j];
}
for (j = 2; j > -1; j--) {
if (get_third_q_of_triplets_at_q(address,
j,
bz_map,
mesh,
bzmesh,
bzmesh_double) == 0) {
break;
}
}
for (j = 0; j < 3; j++) {
for (k = 0; k < 3; k++) {
address_double[k] = address[j][k] * 2;
if (address_double[k] < 0) {
address_double[k] += bzmesh_double[k];
}
}
triplets[i][j] = bz_map[get_grid_point(address_double, bzmesh)];
}
}
free(ir_grid_points);
return num_ir;
}
static int get_third_q_of_triplets_at_q(int address[3][3],
const int q_index,
const int bz_map[],
const int mesh[3],
const int bzmesh[3],
const int bzmesh_double[3])
{
int i, j, smallest_g, smallest_index, sum_g, delta_g[3];
int bzgp[27], address_double[3];
get_vector_modulo(address[q_index], mesh);
for (i = 0; i < 3; i++) {
delta_g[i] = 0;
for (j = 0; j < 3; j++) {
delta_g[i] += address[j][i];
}
delta_g[i] /= mesh[i];
}
for (i = 0; i < 27; i++) {
for (j = 0; j < 3; j++) {
address_double[j] = (address[q_index][j] +
search_space[i][j] * mesh[j]) * 2;
}
for (j = 0; j < 3; j++) {
if (address_double[j] < 0) {
address_double[j] += bzmesh_double[j];
}
}
bzgp[i] = bz_map[get_grid_point(address_double, bzmesh)];
}
for (i = 0; i < 27; i++) {
if (bzgp[i] != -1) {
goto escape;
}
}
warning_print("******* Warning *******\n");
warning_print(" No third-q was found.\n");
warning_print("******* Warning *******\n");
escape:
smallest_g = 4;
smallest_index = 0;
for (i = 0; i < 27; i++) {
if (bzgp[i] > -1) { /* q'' is in BZ */
sum_g = (abs(delta_g[0] + search_space[i][0]) +
abs(delta_g[1] + search_space[i][1]) +
abs(delta_g[2] + search_space[i][2]));
if (sum_g < smallest_g) {
smallest_index = i;
smallest_g = sum_g;
}
}
}
for (i = 0; i < 3; i++) {
address[q_index][i] += search_space[smallest_index][i] * mesh[i];
}
return smallest_g;
}
static int get_grid_point(const int grid_double[3],
const int mesh[3])
{
int i, grid[3];
for (i = 0; i < 3; i++) {
if (grid_double[i] % 2 == 0) {
grid[i] = grid_double[i] / 2;
} else {
grid[i] = (grid_double[i] - 1) / 2;
}
}
#ifndef GRID_ORDER_XYZ
return grid[2] * mesh[0] * mesh[1] + grid[1] * mesh[0] + grid[0];
#else
return grid[0] * mesh[1] * mesh[2] + grid[1] * mesh[2] + grid[2];
#endif
}
static void grid_point_to_grid_double(int grid_double[3],
const int grid_point,
const int mesh[3],
const int is_shift[3])
{
int i;
int grid[3];
#ifndef GRID_ORDER_XYZ
grid[2] = grid_point / (mesh[0] * mesh[1]);
grid[1] = (grid_point - grid[2] * mesh[0] * mesh[1]) / mesh[0];
grid[0] = grid_point % mesh[0];
#else
grid[0] = grid_point / (mesh[1] * mesh[2]);
grid[1] = (grid_point - grid[0] * mesh[1] * mesh[2]) / mesh[2];
grid[2] = grid_point % mesh[2];
#endif
for (i = 0; i < 3; i++) {
grid_double[i] = grid[i] * 2 + is_shift[i];
}
}
static void get_grid_address(int address[3],
const int grid_double[3],
const int mesh[3])
{
int i;
for (i = 0; i < 3; i++) {
if (grid_double[i] % 2 == 0) {
address[i] = grid_double[i] / 2;
} else {
address[i] = (grid_double[i] - 1) / 2;
}
#ifndef GRID_BOUNDARY_AS_NEGATIVE
address[i] = address[i] - mesh[i] * (address[i] > mesh[i] / 2);
#else
address[i] = address[i] - mesh[i] * (address[i] >= mesh[i] / 2);
#endif
}
}
static void get_vector_modulo(int v[3], const int m[3])
{
int i;
for (i = 0; i < 3; i++) {
v[i] = v[i] % m[i];
if (v[i] < 0)
v[i] += m[i];
}
}
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "opencl.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float *m = (float*)calloc(rows*cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[k*lda+i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
register float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(TA && !TB)
gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else if(!TA && TB)
gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
else
gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
#ifdef GPU
#ifndef ARM
#include <clblast_c.h>
#endif
void gemm_kernel_init(void)
{
#ifndef ARM
/*
cl_int clErr;
clErr = clblasSetup();
if (clErr != CL_SUCCESS)
{
printf("gemm_kernel_init: Could not setup clBLAS. Errorcode: %d\n", clErr);
}
*/
#endif
}
void gemm_kernel_release(void)
{
#ifndef ARM
/*
clblasTeardown();
*/
#endif
}
cl_mem_ext random_matrix_gpu(int rows, int cols)
{
int i;
float *m = (float*)calloc(rows*cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return opencl_make_array(m, rows*cols);
}
#if !defined(GPU_MULTI) && !defined(ARM)
void gemm_offset_gpu(
int TA, int TB, int M, int N, int K,
float ALPHA,
cl_mem_ext A_gpu, int offset_A, int lda,
cl_mem_ext B_gpu, int offset_B, int ldb,
float BETA,
cl_mem_ext C_gpu, int offset_C, int ldc)
{
#ifdef BENCHMARK
clock_t t;
t = clock();
#endif
CLBlastStatusCode clErr;
cl_command_queue que = opencl_queues[opencl_device_id_t];
clErr = CLBlastSgemm(CLBlastLayoutRowMajor,
(TA ? CLBlastTransposeYes : CLBlastTransposeNo),
(TB ? CLBlastTransposeYes : CLBlastTransposeNo),
M, N, K,
ALPHA,
A_gpu.mem, offset_A, lda,
B_gpu.mem, offset_B, ldb,
BETA,
C_gpu.mem, offset_C, ldc,
&que, NULL);
// clFlush(que);
#ifdef BENCHMARK
t = clock() - t;
double time_taken = ((double)t);
printf("%s\t%d\n", "CLBlastSgemm", (int)time_taken);
#endif
if (clErr != CLBlastSuccess)
{
printf("gemm_gpu: CLBlastSgemm failed. Errorcode: %d\n", clErr);
assert(0);
}
}
#endif
void gemm_gpu(int TA, int TB, int M, int N, int K,
float ALPHA,
cl_mem_ext A_gpu, int lda,
cl_mem_ext B_gpu, int ldb,
float BETA,
cl_mem_ext C_gpu, int ldc)
{
#ifdef BENCHMARK
clock_t t;
t = clock();
#endif
gemm_offset_gpu(TA, TB, M, N, K,
ALPHA,
A_gpu, 0, lda,
B_gpu, 0, ldb,
BETA,
C_gpu, 0, ldc);
#ifdef BENCHMARK
t = clock() - t;
double time_taken = ((double)t);
printf("%s\t%d\n", "gemm_offset_gpu", (int)time_taken);
#endif
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
cl_mem_ext a;
if(!TA) a = random_matrix_gpu(m,k);
else a = random_matrix_gpu(k,m);
int lda = (!TA)?k:m;
cl_mem_ext b;
if(!TB) b = random_matrix_gpu(k,n);
else b = random_matrix_gpu(n,k);
int ldb = (!TB)?n:k;
cl_mem_ext c = random_matrix_gpu(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
opencl_free(a);
opencl_free(b);
opencl_free(c);
}
void time_gpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
cl_mem_ext a_cl = opencl_make_array(a, m*k);
cl_mem_ext b_cl = opencl_make_array(b, k*n);
cl_mem_ext c_cl = opencl_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
clFinish(opencl_queues[opencl_device_id_t]);
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
opencl_free(a_cl);
opencl_free(b_cl);
opencl_free(c_cl);
free(a);
free(b);
free(c);
}
/* TODO: THINK ABOUT IT?!
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
cl_mem_ext a;
if(!TA) a = random_matrix_gpu(m,k);
else a = random_matrix_gpu(k,m);
int lda = (!TA)?k:m;
cl_mem_ext b;
if(!TB) b = random_matrix_gpu(k,n);
else b = random_matrix_gpu(n,k);
int ldb = (!TB)?n:k;
cl_mem_ext c = random_matrix_gpu(m,n);
cl_mem_ext c_gpu = random_matrix_gpu(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
opencl_free(a);
opencl_free(b);
opencl_free(c);
opencl_free(c_gpu);
}
*/
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,64,2916,363);
time_gpu(0,0,192,729,1600);
time_gpu(0,0,384,196,1728);
time_gpu(0,0,256,196,3456);
time_gpu(0,0,256,196,2304);
time_gpu(0,0,128,4096,12544);
time_gpu(0,0,128,4096,4096);
*/
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,75,12544);
time_gpu(0,0,64,576,12544);
time_gpu(0,0,256,2304,784);
time_gpu(1,1,2304,256,784);
time_gpu(0,0,512,4608,196);
time_gpu(1,1,4608,512,196);
return 0;
}
#endif
|
dsymm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zsymm.c, normal z -> d, Fri Sep 28 17:38:02 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_symm
*
* Performs one of the matrix-matrix operations
*
* \f[ C = \alpha \times A \times B + \beta \times C \f]
* or
* \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* where alpha and beta are scalars, A is a symmetric matrix and B and
* C are m-by-n matrices.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether the symmetric matrix A appears on the
* left or right in the operation as follows:
* - PlasmaLeft: \f[ C = \alpha \times A \times B + \beta \times C \f]
* - PlasmaRight: \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* @param[in] uplo
* Specifies whether the upper or lower triangular part of
* the symmetric matrix A is to be referenced as follows:
* - PlasmaLower: Only the lower triangular part of the
* symmetric matrix A is to be referenced.
* - PlasmaUpper: Only the upper triangular part of the
* symmetric matrix A is to be referenced.
*
* @param[in] m
* The number of rows of the matrix C. m >= 0.
*
* @param[in] n
* The number of columns of the matrix C. n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* A is an lda-by-ka matrix, where ka is m when side = PlasmaLeft,
* and is n otherwise. Only the uplo triangular part is referenced.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,ka).
*
* @param[in] pB
* B is an ldb-by-n matrix, where the leading m-by-n part of
* the array B must contain the matrix B.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] pC
* C is an ldc-by-n matrix.
* On exit, the array is overwritten by the m-by-n updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1,m).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_dsymm
* @sa plasma_csymm
* @sa plasma_dsymm
* @sa plasma_ssymm
*
******************************************************************************/
int plasma_dsymm(plasma_enum_t side, plasma_enum_t uplo,
int m, int n,
double alpha, double *pA, int lda,
double *pB, int ldb,
double beta, double *pC, int ldc)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((side != PlasmaLeft) && (side != PlasmaRight)) {
plasma_error("illegal value of side");
return -1;
}
if ((uplo != PlasmaLower) && (uplo != PlasmaUpper)) {
plasma_error("illegal value of uplo");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
int am;
if (side == PlasmaLeft)
am = m;
else
am = n;
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldb < imax(1, m)) {
plasma_error("illegal value of ldb");
return -9;
}
if (ldc < imax(1, m)) {
plasma_error("illegal value of ldc");
return -12;
}
// quick return
if (m == 0 || n == 0 || (alpha == 0.0 && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_symm(plasma, PlasmaRealDouble, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
plasma_desc_t C;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
am, am, 0, 0, am, am, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &C);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
plasma_omp_dge2desc(pC, ldc, C, &sequence, &request);
// Call the tile async function.
plasma_omp_dsymm(side, uplo,
alpha, A,
B,
beta, C,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(C, pC, ldc, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
plasma_desc_destroy(&C);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_symm
*
* Performs symmetric matrix multiplication.
* Non-blocking tile version of plasma_dsymm().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether the symmetric matrix A appears on the
* left or right in the operation as follows:
* - PlasmaLeft: \f[ C = \alpha \times A \times B + \beta \times C \f]
* - PlasmaRight: \f[ C = \alpha \times B \times A + \beta \times C \f]
*
* @param[in] uplo
* Specifies whether the upper or lower triangular part of
* the symmetric matrix A is to be referenced as follows:
* - PlasmaLower: Only the lower triangular part of the
* symmetric matrix A is to be referenced.
* - PlasmaUpper: Only the upper triangular part of the
* symmetric matrix A is to be referenced.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] B
* Descriptor of matrix B.
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* Descriptor of matrix C.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
*******************************************************************************
*
* @sa plasma_dsymm
* @sa plasma_omp_csymm
* @sa plasma_omp_dsymm
* @sa plasma_omp_ssymm
*
******************************************************************************/
void plasma_omp_dsymm(plasma_enum_t side, plasma_enum_t uplo,
double alpha, plasma_desc_t A,
plasma_desc_t B,
double beta, plasma_desc_t C,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((side != PlasmaLeft) &&
(side != PlasmaRight)) {
plasma_error("illegal value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((uplo != PlasmaLower) &&
(uplo != PlasmaUpper)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(C) != PlasmaSuccess) {
plasma_error("invalid C");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (C.m == 0 || C.n == 0 || ((alpha == 0.0 || A.n == 0) && beta == 1.0))
return;
// Call the parallel function.
plasma_pdsymm(side, uplo,
alpha, A,
B,
beta, C,
sequence, request);
}
|
GB_binop__min_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__min_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__min_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__min_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_uint16)
// A*D function (colscale): GB (_AxD__min_uint16)
// D*A function (rowscale): GB (_DxB__min_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__min_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__min_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_uint16)
// C=scalar+B GB (_bind1st__min_uint16)
// C=scalar+B' GB (_bind1st_tran__min_uint16)
// C=A+scalar GB (_bind2nd__min_uint16)
// C=A'+scalar GB (_bind2nd_tran__min_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = GB_IMIN (aij, bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMIN (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_UINT16 || GxB_NO_MIN_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__min_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMIN (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMIN (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMIN (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__ainv_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_fp64_fp64)
// op(A') function: GB (_unop_tran__ainv_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = -z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = -z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
LADMM.h | #ifndef LADMM_H
#define LADMM_H
#include "Matrix.h"
#include <string>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <stdio.h> /* printf */
#include <time.h>
#include <fstream>
#include <algorithm>
#include <iomanip>
#include <ctime>
#include <sstream>
#include <omp.h>
//#include "cmd_line.h"
//This class implements the method LADMM
/*
The optimization problem to solve is:
min \sum_i f^i(A_ix)+ \sum_i h^i(y^i)+ g(x)
s.t. Mx= y
Assumption 1: For each i, f_i is smooth, g(x) is seperable
*/
template<typename L, typename D>
class LADMM
{
private:
std::vector<D> Ax;
std::vector<D> old_Ax;
std::vector<D> Mx;
std::vector<D> old_Mx;
std::vector<D> Mty;
std::vector<D> old_Mty;
std::vector<D> Mtlambda;
std::vector<D> old_Mtlambda;
std::vector<D> gradient;
std::vector<D> MtMx;
protected:
Matrix<L, D> data_A;
Matrix<L, D> data_M;
std::vector<D> x;
std::vector<D> old_x;
std::vector<D> y;
std::vector<D> old_y;
std::vector<D> lambda;
std::vector<D> old_lambda;
D beta;
L m_1;
L m_2;
L m_3;
D rho;
D tau;
D sigma;
D L_phi;
D function_value;
D infeas;
L print_every_N_ADMM;
D running_time_ADMM;
L nb_outer_iters;
ofstream samp_ADMM;
public:
D lambda_f;
D mu_g;
D lambda1;
D lambda2;
D L_h;
virtual inline D value_of_f_j(D, L){return D(NULL);}
virtual inline D value_of_h_j(D, L){return D(NULL);}
virtual inline D gradient_of_f_j(D, L){return D(NULL);}
virtual inline D prox_of_h_j(D,D, L){return D(NULL);}
virtual inline D value_of_g_j(D, L){return D(NULL);}
virtual inline D prox_of_g_j(D, D, L){return D(NULL);}
virtual inline void set_matrix_M(){}
virtual inline void set_matrix_A(){}
/*
LADMM(const char* matrix_file, const char* matrix_file2)
: Primal_Dual_LOOPLESS_Katyusha0<L,D>(),data_A(matrix_file), data_M(matrix_file2)
{
this->matrix_merge(data_A,data_M);
this->gamma=1;
}
*/
inline void set_L_phi(){
if (data_A.nsamples== 0){
L_phi= 0;
}
else{
L_phi= compute_lambda_max_A(10);
}
}
D compute_lambda_max_A(L K){
std::vector<D> bk(data_A.nfeatures);
for (L j=0;j<data_A.nfeatures;j++)
{
bk[j]=1;
}
std::vector<D> yk(data_A.nsamples);
D normk;
D tmp;
for(L kk=0;kk<K;kk++){
for (L i=0;i<data_A.nsamples;i++){
tmp=0;
for (L k = data_A.ptr[i]; k < data_A.ptr[i + 1]; k++)
{
L j=data_A.row_idx[k];
tmp+=data_A.A[k]*bk[j];
}
yk[i]=tmp;
}
normk=0;
for (L j=0;j<data_A.nfeatures;j++){
bk[j]=0;
for (L k = data_A.ptr_t[j]; k < data_A.ptr_t[j + 1]; k++)
{
L i=data_A.col_idx[k];
bk[j]+=data_A.A_t[k]*yk[i]*lambda_f;
}
normk+=bk[j]*bk[j];
}
normk=sqrt(normk);
for (L j=0;j<data_A.nfeatures;j++)
{bk[j]=bk[j]/normk; }
}
cout<<endl;
D res=0;
normk=0;
for (L i=0;i<data_A.nsamples;i++){
tmp=0;
for (L k = data_A.ptr[i]; k < data_A.ptr[i + 1]; k++)
{
L j=data_A.row_idx[k];
tmp+=data_A.A[k]*bk[j];
}
yk[i]=tmp;
normk+=yk[i]*yk[i];
}
std::vector<D> bk2(data_A.nfeatures);
for (L j=0;j<data_A.nfeatures;j++){
bk2[j]=0;
for (L k = data_A.ptr_t[j]; k < data_A.ptr_t[j + 1]; k++)
{
L i=data_A.col_idx[k];
bk2[j]+=data_A.A_t[k]*yk[i]*lambda_f;
}
}
for (L j=0;j<data_A.nfeatures;j++)
res+=bk2[j]*bk[j];
return res;
}
D compute_lambda_max_M(L K){
std::vector<D> bk(data_M.nfeatures);
for (L j=0;j<data_M.nfeatures- 1;j++)
{
bk[j]=1;
}
bk[data_M.nfeatures- 1]= 2;
std::vector<D> yk(data_M.nsamples);
D normk;
D tmp;
for(L kk=0;kk<K;kk++){
for (L i=0;i<data_M.nsamples;i++){
tmp=0;
for (L k = data_M.ptr[i]; k < data_M.ptr[i + 1]; k++)
{
L j=data_M.row_idx[k];
tmp+=data_M.A[k]*bk[j];
}
yk[i]=tmp;
}
normk=0;
for (L j=0;j<data_M.nfeatures;j++){
bk[j]=0;
for (L k = data_M.ptr_t[j]; k < data_M.ptr_t[j + 1]; k++)
{
L i=data_M.col_idx[k];
bk[j]+=data_M.A_t[k]*yk[i];
}
normk+=bk[j]*bk[j];
}
normk=sqrt(normk);
for (L j=0;j<data_M.nfeatures;j++)
{bk[j]=bk[j]/normk; }
}
D res=0;
normk=0;
for (L i=0;i<data_M.nsamples;i++){
tmp=0;
for (L k = data_M.ptr[i]; k < data_M.ptr[i + 1]; k++)
{
L j=data_M.row_idx[k];
tmp+=data_M.A[k]*bk[j];
}
yk[i]=tmp;
normk+=yk[i]*yk[i];
}
std::vector<D> bk2(data_M.nfeatures);
for (L j=0;j<data_M.nfeatures;j++){
bk2[j]=0;
for (L k = data_M.ptr_t[j]; k < data_M.ptr_t[j + 1]; k++)
{
L i=data_M.col_idx[k];
bk2[j]+=data_M.A_t[k]*yk[i];
}
}
for (L j=0;j<data_M.nfeatures;j++)
res+=bk2[j]*bk[j];
return res;
}
L get_nb_features(){
return data_A.get_d();
}
/* inline D get_lambda1(){return lambda1;}
inline D get_lambda2(){return lambda2;}
*/
void update_x(){
#pragma omp parallel for
for (L i= 0; i< m_1; i++){
gradient[i]= 0;
for (L k = data_A.ptr_t[i]; k < data_A.ptr_t[i + 1];k++)
{
L j=data_A.col_idx[k];
gradient[i]+= data_A.A_t[k]*gradient_of_f_j(Ax[j],j);
}
MtMx[i]= 0;
for (L k = data_M.ptr_t[i]; k < data_M.ptr_t[i + 1];k++)
{
L j=data_M.col_idx[k];
MtMx[i]+= data_M.A_t[k]*Mx[j];
}
x[i]= prox_of_g_j(x[i]- (Mtlambda[i]+ gradient[i]+ beta*MtMx[i]- beta*Mty[i])/(beta*tau+ L_phi), beta*tau+ L_phi, i);
}
#pragma omp parallel
{
#pragma omp for
for (L i= 0; i< m_2; i++){
Ax[i]= compute_AiTx(i);
}
#pragma omp for
for (L i= 0; i< m_3; i++){
Mx[i]= compute_MiTx(i);
}
}
}
void update_y(){
#pragma omp parallel for
for (L i=0; i< m_3; i++){
y[i]= prox_of_h_j(y[i]+ (lambda[i]+ beta*Mx[i]- beta*y[i])/(beta*sigma), beta*sigma, i);
}
#pragma omp parallel for
for (L i=0; i< m_1; i++){
Mty[i]= compute_MTiTy(i);
}
}
D compute_AiTx(L i){
D res=0;
for (L k = data_A.ptr[i]; k < data_A.ptr[i + 1];k++)
{
L j=data_A.row_idx[k];
res+= data_A.A[k]*x[j];
}
return res;
}
D compute_MiTx(L i){
D res=0;
for (L k = data_M.ptr[i]; k < data_M.ptr[i + 1];k++)
{
L j=data_M.row_idx[k];
res+= data_M.A[k]*x[j];
}
return res;
}
D compute_MTiTy(L i){
D res=0;
for (L k = data_M.ptr_t[i]; k < data_M.ptr_t[i + 1];k++)
{
L j=data_M.col_idx[k];
res+= data_M.A_t[k]*y[j];
}
return res;
}
D compute_MTiTlambda(L i){
D res=0;
for (L k = data_M.ptr_t[i]; k < data_M.ptr_t[i + 1];k++)
{
L j=data_M.col_idx[k];
res+= data_M.A_t[k]*lambda[j];
}
return res;
}
void compute_function_value(){
D res= 0;
for (L i= 0; i< data_A.nfeatures; i++){
res+= value_of_g_j(x[i],i);
}
for (L i= 0; i< data_M.nsamples; i++){
res+= value_of_h_j(Mx[i],i);
}
for (L i= 0; i<data_A.nsamples; i++){
res+= value_of_f_j(Ax[i],i);
}
function_value= res;
}
void compute_infeasibility(){
D res = 0;
for (L i= 0; i< data_M.nsamples; i++){
res+= (Mx[i]- data_M.b[i])*(Mx[i]- data_M.b[i]);
}
infeas= sqrt(res);
}
void update_lambda(){
#pragma omp for
for(L j=0;j<m_3;j++)
{
lambda[j]= lambda[j]+ beta*(Mx[j]- y[j]);
}
#pragma omp for
for (L i=0; i< m_1; i++){
Mtlambda[i]= compute_MTiTlambda(i);
}
}
void Initialize(D beta_0, D val_rho, vector<D> & x0,vector<D> & y0, vector<D> & lambda0){
cout<<"start initializing"<<endl;
set_matrix_M();
set_matrix_A();
m_1=data_A.get_d();
m_2=data_A.get_n();
m_3=data_M.get_n();
cout<<"m_1="<<m_1<<endl;
cout<<"m_2="<<m_2<<endl;
cout<<"m_3="<<m_3<<endl;
beta=beta_0;
//tau= data_A.nsamples;
tau= 1.02*compute_lambda_max_M(10);
sigma= 1;
rho=val_rho;
x.resize(m_1,0);
old_x.resize(m_1,0);
y.resize(m_3,0);
old_y.resize(m_3,0);
lambda.resize(m_3,0);
old_lambda.resize(m_3,0);
for(L i=0;i<m_1;i++){
x[i]=x0[i];
}
for(L j=0;j<m_3;j++){
y[j]=y0[j];
}
for(L j=0;j<m_3;j++){
lambda[j]=lambda0[j];
}
Ax.clear();
Ax.resize(m_2,0);
Mx.clear();
Mx.resize(m_3,0);
Mty.clear();
Mty.resize(m_1,0);
Mtlambda.clear();
Mtlambda.resize(m_1,0);
gradient.clear();
gradient.resize(m_1,0);
MtMx.clear();
MtMx.resize(m_1,0);
L_phi= 0;
set_L_phi();
cout<< "L_phi= "<< L_phi << " beta= "<< beta<< " rho= "<< rho<< " tau= "<< tau<< " sigma= "<< sigma<<endl;
}
void reset_everything(){
beta*=rho;
}
inline void compute_and_record_res(){
if(nb_outer_iters%print_every_N_ADMM==0){
compute_function_value();
compute_infeasibility();
cout<<setprecision(9)<<"Iteration: "<<nb_outer_iters<<"; time="<<running_time_ADMM<< "; function value="<<function_value<< "; infeasibility="<< infeas<< endl;
samp_ADMM<<setprecision(9)<<nb_outer_iters<<" "<<running_time_ADMM<<" "<< function_value<<" "<< infeas<< endl;
}
}
void ADMM_solve_with_Linear(D beta_0, D val_rho,vector<D> & x0,vector<D> & y0, vector<D> & lambda0, L max_nb_outer, L p_N_1, string filename1, D time){
Initialize(beta_0,val_rho, x0, y0, lambda0);
nb_outer_iters=0;
//string sampname2= ALGparam.data_dir +"/results/L_Katyusha_"+filename2;
//filename1= ALGparam.data_dir +"/results/ADMM_"+filename1;
filename1= "results/ADMM_"+filename1;
samp_ADMM.open(filename1.c_str());
running_time_ADMM=0;
print_every_N_ADMM=p_N_1;
compute_and_record_res();
D start;
D res_x, res_y, res_l;
/*
for(L i=0;i<m_3;i++){
old_lambda[i]=lambda[i];
}
*/
while(nb_outer_iters<max_nb_outer){
//rescale();
for(L i=0;i<m_1;i++){
old_x[i]=x[i];
}
for(L i=0;i<m_3;i++){
old_y[i]=y[i];
}
for(L i=0;i<m_3;i++){
old_lambda[i]=lambda[i];
}
//start = std::clock();
start=omp_get_wtime();
update_x();
update_y();
update_lambda();
nb_outer_iters++;
//running_time_ADMM+=( std::clock() - start ) / (double) CLOCKS_PER_SEC;
running_time_ADMM+=omp_get_wtime()-start;
compute_and_record_res();
/*
res_x= 0;
for(L i=0;i<m_1;i++){
res_x+= (old_x[i]- x[i])*(old_x[i]- x[i]);
}
res_y= 0;
for(L i=0;i<m_3;i++){
res_y= (old_y[i]- y[i])*(old_y[i]- y[i]);
}
res_l= 0;
for(L i=0;i<m_3;i++){
res_l= (old_lambda[i]- lambda[i])*(old_lambda[i]- lambda[i]);
}
cout<< "res_x= "<< res_x<< " res_y= "<< res_y<< " res_l= "<< res_l<< endl;
system("pause");
*/
//start = std::clock();
start=omp_get_wtime();
reset_everything();
//running_time_ADMM+=( std::clock() - start ) / (double) CLOCKS_PER_SEC;
running_time_ADMM+=omp_get_wtime()-start;
if (running_time_ADMM> time){
break;
}
}
}
};
#endif /* MIN_SMOOTH_CONVEX_H */
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i + 1 < outh; i += 2)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
}
static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm2, int inch, int outch, const Option& opt)
{
Mat kernel_tm(4 * 4, inch, outch);
// G
const float ktm[4][3] = {
{1.0f, 0.0f, 0.0f},
{1.0f / 2, 1.0f / 2, 1.0f / 2},
{1.0f / 2, -1.0f / 2, 1.0f / 2},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[4][3];
for (int i = 0; i < 4; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 4; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 4; i++)
{
kernel_tm0[j * 4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 16-inch-outch
// dst = inch-16-outch
#if __SSE2__
kernel_tm2.create(8 * inch, 16, outch / 8 + (outch % 8) / 4 + outch % 4);
#else
kernel_tm2.create(inch, 16, outch);
#endif
int q = 0;
#if __SSE2__
for (; q + 7 < outch; q += 8)
{
Mat g0 = kernel_tm2.channel(q / 8);
for (int k = 0; k < 16; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = kernel_tm.channel(q + i).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
for (; q + 3 < outch; q += 4)
{
Mat g0 = kernel_tm2.channel(q / 8 + (q % 8) / 4);
for (int k = 0; k < 16; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
for (int i = 0; i < 4; i++)
{
const float* k00 = kernel_tm.channel(q + i).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
#endif
for (; q < outch; q++)
{
#if __SSE2__
Mat g0 = kernel_tm2.channel(q / 8 + (q % 8) / 4 + q % 4);
#else
Mat g0 = kernel_tm2.channel(q);
#endif
for (int k = 0; k < 16; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
const float* k00 = kernel_tm.channel(q).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
static void conv3x3s1_winograd23_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 2;
int h_tiles = outh / 2;
int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 16, inch, 4u, opt.workspace_allocator);
conv3x3s1_winograd23_transform_input_sse(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
const int tiles = h_tm / 4 * w_tm / 4;
// permute
Mat bottom_blob_tm2;
#if __AVX__
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 16, 4u, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 16, 4u, opt.workspace_allocator);
else
bottom_blob_tm2.create(1 * inch, tiles, 16, 4u, opt.workspace_allocator);
#elif __SSE2__
if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 16, 4u, opt.workspace_allocator);
else
bottom_blob_tm2.create(1 * inch, tiles, 16, 4u, opt.workspace_allocator);
#else
bottom_blob_tm2.create(1 * inch, tiles, 16, 4u, opt.workspace_allocator);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 16; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __SSE2__
#if __AVX__
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(r0);
_mm256_storeu_ps(tmpptr, _r0);
r0 += bottom_blob_tm.cstep;
tmpptr += 8;
}
}
#endif // __AVX__
for (; i + 3 < tiles; i += 4)
{
#if __AVX__
float* tmpptr = tm2.row(i / 8 + (i % 8) / 4);
#else
float* tmpptr = tm2.row(i / 4);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i);
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
_mm_storeu_ps(tmpptr, _r0);
r0 += bottom_blob_tm.cstep;
tmpptr += 4;
}
}
#endif // __SSE2__
for (; i < tiles; i++)
{
#if __AVX__
float* tmpptr = tm2.row(i / 8 + (i % 8) / 4 + i % 4);
#elif __SSE2__
float* tmpptr = tm2.row(i / 4 + i % 4);
#else
float* tmpptr = tm2.row(i);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i);
for (int q = 0; q < inch; q++)
{
tmpptr[0] = r0[0];
r0 += bottom_blob_tm.cstep;
tmpptr += 1;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 16, outch, 4u, opt.workspace_allocator);
#if __SSE2__
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
float* output4_tm = top_blob_tm.channel(p + 4);
float* output5_tm = top_blob_tm.channel(p + 5);
float* output6_tm = top_blob_tm.channel(p + 6);
float* output7_tm = top_blob_tm.channel(p + 7);
const Mat kernel0_tm = kernel_tm.channel(p / 8);
for (int r = 0; r < 16; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __AVX__
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
__m256 _sum4 = _mm256_setzero_ps();
__m256 _sum5 = _mm256_setzero_ps();
__m256 _sum6 = _mm256_setzero_ps();
__m256 _sum7 = _mm256_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m256 _val = _mm256_loadu_ps(r0);
__m256 _w0 = _mm256_broadcast_ss(k0);
__m256 _w1 = _mm256_broadcast_ss(k0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
__m256 _w2 = _mm256_broadcast_ss(k0 + 2);
__m256 _w3 = _mm256_broadcast_ss(k0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
__m256 _w4 = _mm256_broadcast_ss(k0 + 4);
__m256 _w5 = _mm256_broadcast_ss(k0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val, _w5, _sum5);
__m256 _w6 = _mm256_broadcast_ss(k0 + 6);
__m256 _w7 = _mm256_broadcast_ss(k0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 8;
k0 += 8;
_val = _mm256_loadu_ps(r0);
_w0 = _mm256_broadcast_ss(k0);
_w1 = _mm256_broadcast_ss(k0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_w2 = _mm256_broadcast_ss(k0 + 2);
_w3 = _mm256_broadcast_ss(k0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
_w4 = _mm256_broadcast_ss(k0 + 4);
_w5 = _mm256_broadcast_ss(k0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val, _w5, _sum5);
_w6 = _mm256_broadcast_ss(k0 + 6);
_w7 = _mm256_broadcast_ss(k0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 8;
k0 += 8;
_val = _mm256_loadu_ps(r0);
_w0 = _mm256_broadcast_ss(k0);
_w1 = _mm256_broadcast_ss(k0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_w2 = _mm256_broadcast_ss(k0 + 2);
_w3 = _mm256_broadcast_ss(k0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
_w4 = _mm256_broadcast_ss(k0 + 4);
_w5 = _mm256_broadcast_ss(k0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val, _w5, _sum5);
_w6 = _mm256_broadcast_ss(k0 + 6);
_w7 = _mm256_broadcast_ss(k0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 8;
k0 += 8;
_val = _mm256_loadu_ps(r0);
_w0 = _mm256_broadcast_ss(k0);
_w1 = _mm256_broadcast_ss(k0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_w2 = _mm256_broadcast_ss(k0 + 2);
_w3 = _mm256_broadcast_ss(k0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
_w4 = _mm256_broadcast_ss(k0 + 4);
_w5 = _mm256_broadcast_ss(k0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val, _w5, _sum5);
_w6 = _mm256_broadcast_ss(k0 + 6);
_w7 = _mm256_broadcast_ss(k0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 8;
k0 += 8;
}
for (; j < nn; j++)
{
__m256 _val = _mm256_loadu_ps(r0);
__m256 _w0 = _mm256_broadcast_ss(k0);
__m256 _w1 = _mm256_broadcast_ss(k0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
__m256 _w2 = _mm256_broadcast_ss(k0 + 2);
__m256 _w3 = _mm256_broadcast_ss(k0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
__m256 _w4 = _mm256_broadcast_ss(k0 + 4);
__m256 _w5 = _mm256_broadcast_ss(k0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val, _w5, _sum5);
__m256 _w6 = _mm256_broadcast_ss(k0 + 6);
__m256 _w7 = _mm256_broadcast_ss(k0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 8;
k0 += 8;
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output1_tm, _sum1);
_mm256_storeu_ps(output2_tm, _sum2);
_mm256_storeu_ps(output3_tm, _sum3);
_mm256_storeu_ps(output4_tm, _sum4);
_mm256_storeu_ps(output5_tm, _sum5);
_mm256_storeu_ps(output6_tm, _sum6);
_mm256_storeu_ps(output7_tm, _sum7);
output0_tm += 8;
output1_tm += 8;
output2_tm += 8;
output3_tm += 8;
output4_tm += 8;
output5_tm += 8;
output6_tm += 8;
output7_tm += 8;
}
#endif // __AVX__
for (; i + 3 < tiles; i += 4)
{
#if __AVX__
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#else
const float* r0 = bb2.row(i / 4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
__m128 _sum4 = _mm_setzero_ps();
__m128 _sum5 = _mm_setzero_ps();
__m128 _sum6 = _mm_setzero_ps();
__m128 _sum7 = _mm_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m128 _val = _mm_loadu_ps(r0);
__m128 _w0 = _mm_load1_ps(k0);
__m128 _w1 = _mm_load1_ps(k0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
__m128 _w2 = _mm_load1_ps(k0 + 2);
__m128 _w3 = _mm_load1_ps(k0 + 3);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
__m128 _w4 = _mm_load1_ps(k0 + 4);
__m128 _w5 = _mm_load1_ps(k0 + 5);
_sum4 = _mm_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val, _w5, _sum5);
__m128 _w6 = _mm_load1_ps(k0 + 6);
__m128 _w7 = _mm_load1_ps(k0 + 7);
_sum6 = _mm_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 4;
k0 += 8;
_val = _mm_loadu_ps(r0);
_w0 = _mm_load1_ps(k0);
_w1 = _mm_load1_ps(k0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_w2 = _mm_load1_ps(k0 + 2);
_w3 = _mm_load1_ps(k0 + 3);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
_w4 = _mm_load1_ps(k0 + 4);
_w5 = _mm_load1_ps(k0 + 5);
_sum4 = _mm_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val, _w5, _sum5);
_w6 = _mm_load1_ps(k0 + 6);
_w7 = _mm_load1_ps(k0 + 7);
_sum6 = _mm_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 4;
k0 += 8;
_val = _mm_loadu_ps(r0);
_w0 = _mm_load1_ps(k0);
_w1 = _mm_load1_ps(k0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_w2 = _mm_load1_ps(k0 + 2);
_w3 = _mm_load1_ps(k0 + 3);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
_w4 = _mm_load1_ps(k0 + 4);
_w5 = _mm_load1_ps(k0 + 5);
_sum4 = _mm_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val, _w5, _sum5);
_w6 = _mm_load1_ps(k0 + 6);
_w7 = _mm_load1_ps(k0 + 7);
_sum6 = _mm_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 4;
k0 += 8;
_val = _mm_loadu_ps(r0);
_w0 = _mm_load1_ps(k0);
_w1 = _mm_load1_ps(k0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_w2 = _mm_load1_ps(k0 + 2);
_w3 = _mm_load1_ps(k0 + 3);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
_w4 = _mm_load1_ps(k0 + 4);
_w5 = _mm_load1_ps(k0 + 5);
_sum4 = _mm_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val, _w5, _sum5);
_w6 = _mm_load1_ps(k0 + 6);
_w7 = _mm_load1_ps(k0 + 7);
_sum6 = _mm_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 4;
k0 += 8;
}
for (; j < nn; j++)
{
__m128 _val = _mm_loadu_ps(r0);
__m128 _w0 = _mm_load1_ps(k0);
__m128 _w1 = _mm_load1_ps(k0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
__m128 _w2 = _mm_load1_ps(k0 + 2);
__m128 _w3 = _mm_load1_ps(k0 + 3);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
__m128 _w4 = _mm_load1_ps(k0 + 4);
__m128 _w5 = _mm_load1_ps(k0 + 5);
_sum4 = _mm_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val, _w5, _sum5);
__m128 _w6 = _mm_load1_ps(k0 + 6);
__m128 _w7 = _mm_load1_ps(k0 + 7);
_sum6 = _mm_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 4;
k0 += 8;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
output4_tm += 4;
output5_tm += 4;
output6_tm += 4;
output7_tm += 4;
}
for (; i < tiles; i++)
{
#if __AVX__
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
#else
const float* r0 = bb2.row(i / 4 + i % 4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __AVX__
__m256 _sum = _mm256_setzero_ps();
#else
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
#endif
int j = 0;
for (; j + 3 < nn; j += 4)
{
#if __AVX__
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _w0 = _mm256_loadu_ps(k0);
_sum = _mm256_comp_fmadd_ps(_val0, _w0, _sum);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
__m256 _w1 = _mm256_loadu_ps(k0 + 8);
_sum = _mm256_comp_fmadd_ps(_val1, _w1, _sum);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _w2 = _mm256_loadu_ps(k0 + 16);
_sum = _mm256_comp_fmadd_ps(_val2, _w2, _sum);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
__m256 _w3 = _mm256_loadu_ps(k0 + 24);
_sum = _mm256_comp_fmadd_ps(_val3, _w3, _sum);
#else
__m128 _val0 = _mm_load1_ps(r0);
__m128 _w00 = _mm_loadu_ps(k0);
__m128 _w01 = _mm_loadu_ps(k0 + 4);
_sum0 = _mm_comp_fmadd_ps(_val0, _w00, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val0, _w01, _sum1);
__m128 _val1 = _mm_load1_ps(r0 + 1);
__m128 _w10 = _mm_loadu_ps(k0 + 8);
__m128 _w11 = _mm_loadu_ps(k0 + 12);
_sum0 = _mm_comp_fmadd_ps(_val1, _w10, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w11, _sum1);
__m128 _val2 = _mm_load1_ps(r0 + 2);
__m128 _w20 = _mm_loadu_ps(k0 + 16);
__m128 _w21 = _mm_loadu_ps(k0 + 20);
_sum0 = _mm_comp_fmadd_ps(_val2, _w20, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val2, _w21, _sum1);
__m128 _val3 = _mm_load1_ps(r0 + 3);
__m128 _w30 = _mm_loadu_ps(k0 + 24);
__m128 _w31 = _mm_loadu_ps(k0 + 28);
_sum0 = _mm_comp_fmadd_ps(_val3, _w30, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val3, _w31, _sum1);
#endif
r0 += 4;
k0 += 32;
}
for (; j < nn; j++)
{
#if __AVX__
__m256 _val = _mm256_broadcast_ss(r0);
__m256 _w = _mm256_loadu_ps(k0);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
#else
__m128 _val = _mm_load1_ps(r0);
__m128 _w0 = _mm_loadu_ps(k0);
__m128 _w1 = _mm_loadu_ps(k0 + 4);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
#endif
r0 += 1;
k0 += 8;
}
float sum[8];
#if __AVX__
_mm256_storeu_ps(sum, _sum);
#else
_mm_storeu_ps(sum, _sum0);
_mm_storeu_ps(sum + 4, _sum1);
#endif
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output4_tm[0] = sum[4];
output5_tm[0] = sum[5];
output6_tm[0] = sum[6];
output7_tm[0] = sum[7];
output0_tm++;
output1_tm++;
output2_tm++;
output3_tm++;
output4_tm++;
output5_tm++;
output6_tm++;
output7_tm++;
}
}
}
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p / 8 + (p % 8) / 4);
for (int r = 0; r < 16; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __AVX__
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m256 _val = _mm256_loadu_ps(r0);
__m256 _w0 = _mm256_broadcast_ss(k0);
__m256 _w1 = _mm256_broadcast_ss(k0 + 1);
__m256 _w2 = _mm256_broadcast_ss(k0 + 2);
__m256 _w3 = _mm256_broadcast_ss(k0 + 3);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 8;
k0 += 4;
_val = _mm256_loadu_ps(r0);
_w0 = _mm256_broadcast_ss(k0);
_w1 = _mm256_broadcast_ss(k0 + 1);
_w2 = _mm256_broadcast_ss(k0 + 2);
_w3 = _mm256_broadcast_ss(k0 + 3);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 8;
k0 += 4;
_val = _mm256_loadu_ps(r0);
_w0 = _mm256_broadcast_ss(k0);
_w1 = _mm256_broadcast_ss(k0 + 1);
_w2 = _mm256_broadcast_ss(k0 + 2);
_w3 = _mm256_broadcast_ss(k0 + 3);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 8;
k0 += 4;
_val = _mm256_loadu_ps(r0);
_w0 = _mm256_broadcast_ss(k0);
_w1 = _mm256_broadcast_ss(k0 + 1);
_w2 = _mm256_broadcast_ss(k0 + 2);
_w3 = _mm256_broadcast_ss(k0 + 3);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 8;
k0 += 4;
}
for (; j < nn; j++)
{
__m256 _val = _mm256_loadu_ps(r0);
__m256 _w0 = _mm256_broadcast_ss(k0);
__m256 _w1 = _mm256_broadcast_ss(k0 + 1);
__m256 _w2 = _mm256_broadcast_ss(k0 + 2);
__m256 _w3 = _mm256_broadcast_ss(k0 + 3);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 8;
k0 += 4;
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output1_tm, _sum1);
_mm256_storeu_ps(output2_tm, _sum2);
_mm256_storeu_ps(output3_tm, _sum3);
output0_tm += 8;
output1_tm += 8;
output2_tm += 8;
output3_tm += 8;
}
#endif // __AVX__
for (; i + 3 < tiles; i += 4)
{
#if __AVX__
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#else
const float* r0 = bb2.row(i / 4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m128 _val = _mm_loadu_ps(r0);
__m128 _w0 = _mm_load1_ps(k0);
__m128 _w1 = _mm_load1_ps(k0 + 1);
__m128 _w2 = _mm_load1_ps(k0 + 2);
__m128 _w3 = _mm_load1_ps(k0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 4;
k0 += 4;
_val = _mm_loadu_ps(r0);
_w0 = _mm_load1_ps(k0);
_w1 = _mm_load1_ps(k0 + 1);
_w2 = _mm_load1_ps(k0 + 2);
_w3 = _mm_load1_ps(k0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 4;
k0 += 4;
_val = _mm_loadu_ps(r0);
_w0 = _mm_load1_ps(k0);
_w1 = _mm_load1_ps(k0 + 1);
_w2 = _mm_load1_ps(k0 + 2);
_w3 = _mm_load1_ps(k0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 4;
k0 += 4;
_val = _mm_loadu_ps(r0);
_w0 = _mm_load1_ps(k0);
_w1 = _mm_load1_ps(k0 + 1);
_w2 = _mm_load1_ps(k0 + 2);
_w3 = _mm_load1_ps(k0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 4;
k0 += 4;
}
for (; j < nn; j++)
{
__m128 _val = _mm_loadu_ps(r0);
__m128 _w0 = _mm_load1_ps(k0);
__m128 _w1 = _mm_load1_ps(k0 + 1);
__m128 _w2 = _mm_load1_ps(k0 + 2);
__m128 _w3 = _mm_load1_ps(k0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 4;
k0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
}
for (; i < tiles; i++)
{
#if __AVX__
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
#else
const float* r0 = bb2.row(i / 4 + i % 4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m128 _sum = _mm_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m128 _val0 = _mm_load1_ps(r0);
__m128 _w0 = _mm_loadu_ps(k0);
_sum = _mm_comp_fmadd_ps(_val0, _w0, _sum);
__m128 _val1 = _mm_load1_ps(r0 + 1);
__m128 _w1 = _mm_loadu_ps(k0 + 4);
_sum = _mm_comp_fmadd_ps(_val1, _w1, _sum);
__m128 _val2 = _mm_load1_ps(r0 + 2);
__m128 _w2 = _mm_loadu_ps(k0 + 8);
_sum = _mm_comp_fmadd_ps(_val2, _w2, _sum);
__m128 _val3 = _mm_load1_ps(r0 + 3);
__m128 _w3 = _mm_loadu_ps(k0 + 12);
_sum = _mm_comp_fmadd_ps(_val3, _w3, _sum);
r0 += 4;
k0 += 16;
}
for (; j < nn; j++)
{
__m128 _val = _mm_load1_ps(r0);
__m128 _w0 = _mm_loadu_ps(k0);
_sum = _mm_comp_fmadd_ps(_val, _w0, _sum);
r0 += 1;
k0 += 4;
}
float sum[4];
_mm_storeu_ps(sum, _sum);
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output0_tm++;
output1_tm++;
output2_tm++;
output3_tm++;
}
}
}
remain_outch_start += nn_outch << 2;
#else
int remain_outch_start = 0;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
#if __SSE2__
const Mat kernel0_tm = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const Mat kernel0_tm = kernel_tm.channel(p);
#endif
for (int r = 0; r < 16; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __SSE2__
#if __AVX__
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m256 _val0 = _mm256_loadu_ps(r0);
__m256 _w0 = _mm256_broadcast_ss(k0);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
__m256 _val1 = _mm256_loadu_ps(r0 + 8);
__m256 _w1 = _mm256_broadcast_ss(k0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val1, _w1, _sum0);
__m256 _val2 = _mm256_loadu_ps(r0 + 16);
__m256 _w2 = _mm256_broadcast_ss(k0 + 2);
_sum0 = _mm256_comp_fmadd_ps(_val2, _w2, _sum0);
__m256 _val3 = _mm256_loadu_ps(r0 + 24);
__m256 _w3 = _mm256_broadcast_ss(k0 + 3);
_sum0 = _mm256_comp_fmadd_ps(_val3, _w3, _sum0);
r0 += 32;
k0 += 4;
}
for (; j < nn; j++)
{
__m256 _val = _mm256_loadu_ps(r0);
__m256 _w0 = _mm256_broadcast_ss(k0);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
r0 += 8;
k0++;
}
_mm256_storeu_ps(output0_tm, _sum0);
output0_tm += 8;
}
#endif // __AVX__
for (; i + 3 < tiles; i += 4)
{
#if __AVX__
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#else
const float* r0 = bb2.row(i / 4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m128 _val0 = _mm_loadu_ps(r0);
__m128 _w0 = _mm_load1_ps(k0);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
__m128 _val1 = _mm_loadu_ps(r0 + 4);
__m128 _w1 = _mm_load1_ps(k0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val1, _w1, _sum0);
__m128 _val2 = _mm_loadu_ps(r0 + 8);
__m128 _w2 = _mm_load1_ps(k0 + 2);
_sum0 = _mm_comp_fmadd_ps(_val2, _w2, _sum0);
__m128 _val3 = _mm_loadu_ps(r0 + 12);
__m128 _w3 = _mm_load1_ps(k0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val3, _w3, _sum0);
r0 += 16;
k0 += 4;
}
for (; j < nn; j++)
{
__m128 _val = _mm_loadu_ps(r0);
__m128 _w0 = _mm_load1_ps(k0);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
r0 += 4;
k0++;
}
_mm_storeu_ps(output0_tm, _sum0);
output0_tm += 4;
}
#endif // __SSE2__
for (; i < tiles; i++)
{
#if __AVX__
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
#elif __SSE2__
const float* r0 = bb2.row(i / 4 + i % 4);
#else
const float* r0 = bb2.row(i);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
float sum = 0.f;
for (int j = 0; j < nn; j++)
{
float w0 = k0[0];
float val0 = r0[0];
sum += val0 * w0;
r0 += 1;
k0 += 1;
}
output0_tm[0] = sum;
output0_tm += 1;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
}
{
conv3x3s1_winograd23_transform_output_sse(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm2, int inch, int outch, const Option& opt)
{
Mat kernel_tm(6 * 6, inch, outch);
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = inch-36-outch
#if __SSE2__
kernel_tm2.create(8 * inch, 36, outch / 8 + (outch % 8) / 4 + outch % 4);
#else
kernel_tm2.create(inch, 36, outch);
#endif
int q = 0;
#if __SSE2__
for (; q + 7 < outch; q += 8)
{
Mat g0 = kernel_tm2.channel(q / 8);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
for (int i = 0; i < 8; i++)
{
const float* k00 = kernel_tm.channel(q + i).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
for (; q + 3 < outch; q += 4)
{
Mat g0 = kernel_tm2.channel(q / 8 + (q % 8) / 4);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
for (int i = 0; i < 4; i++)
{
const float* k00 = kernel_tm.channel(q + i).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
#endif
for (; q < outch; q++)
{
#if __SSE2__
Mat g0 = kernel_tm2.channel(q / 8 + (q % 8) / 4 + q % 4);
#else
Mat g0 = kernel_tm2.channel(q);
#endif
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p < inch; p++)
{
const float* k00 = kernel_tm.channel(q).row(p);
g00[0] = k00[k];
g00++;
}
}
}
}
static void conv3x3s1_winograd43_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 4;
int h_tiles = outh / 4;
int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 36, inch, 4u, opt.workspace_allocator);
conv3x3s1_winograd43_transform_input_sse(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
Mat bottom_blob_tm2;
#if __AVX__
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 36, 4u, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 36, 4u, opt.workspace_allocator);
else
bottom_blob_tm2.create(1 * inch, tiles, 36, 4u, opt.workspace_allocator);
#elif __SSE2__
if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 36, 4u, opt.workspace_allocator);
else
bottom_blob_tm2.create(1 * inch, tiles, 36, 4u, opt.workspace_allocator);
#else
bottom_blob_tm2.create(1 * inch, tiles, 36, 4u, opt.workspace_allocator);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __SSE2__
#if __AVX__
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i);
for (int q = 0; q < inch; q++)
{
__m256 _r0 = _mm256_loadu_ps(r0);
_mm256_storeu_ps(tmpptr, _r0);
r0 += bottom_blob_tm.cstep;
tmpptr += 8;
}
}
#endif // __AVX__
for (; i + 3 < tiles; i += 4)
{
#if __AVX__
float* tmpptr = tm2.row(i / 8 + (i % 8) / 4);
#else
float* tmpptr = tm2.row(i / 4);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i);
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
_mm_storeu_ps(tmpptr, _r0);
r0 += bottom_blob_tm.cstep;
tmpptr += 4;
}
}
#endif // __SSE2__
for (; i < tiles; i++)
{
#if __AVX__
float* tmpptr = tm2.row(i / 8 + (i % 8) / 4 + i % 4);
#elif __SSE2__
float* tmpptr = tm2.row(i / 4 + i % 4);
#else
float* tmpptr = tm2.row(i);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i);
for (int q = 0; q < inch; q++)
{
tmpptr[0] = r0[0];
r0 += bottom_blob_tm.cstep;
tmpptr += 1;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u, opt.workspace_allocator);
#if __SSE2__
int nn_outch = outch >> 3;
int remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
float* output4_tm = top_blob_tm.channel(p + 4);
float* output5_tm = top_blob_tm.channel(p + 5);
float* output6_tm = top_blob_tm.channel(p + 6);
float* output7_tm = top_blob_tm.channel(p + 7);
const Mat kernel0_tm = kernel_tm.channel(p / 8);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __AVX__
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
__m256 _sum4 = _mm256_setzero_ps();
__m256 _sum5 = _mm256_setzero_ps();
__m256 _sum6 = _mm256_setzero_ps();
__m256 _sum7 = _mm256_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m256 _val = _mm256_loadu_ps(r0);
__m256 _w0 = _mm256_broadcast_ss(k0);
__m256 _w1 = _mm256_broadcast_ss(k0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
__m256 _w2 = _mm256_broadcast_ss(k0 + 2);
__m256 _w3 = _mm256_broadcast_ss(k0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
__m256 _w4 = _mm256_broadcast_ss(k0 + 4);
__m256 _w5 = _mm256_broadcast_ss(k0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val, _w5, _sum5);
__m256 _w6 = _mm256_broadcast_ss(k0 + 6);
__m256 _w7 = _mm256_broadcast_ss(k0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 8;
k0 += 8;
_val = _mm256_loadu_ps(r0);
_w0 = _mm256_broadcast_ss(k0);
_w1 = _mm256_broadcast_ss(k0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_w2 = _mm256_broadcast_ss(k0 + 2);
_w3 = _mm256_broadcast_ss(k0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
_w4 = _mm256_broadcast_ss(k0 + 4);
_w5 = _mm256_broadcast_ss(k0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val, _w5, _sum5);
_w6 = _mm256_broadcast_ss(k0 + 6);
_w7 = _mm256_broadcast_ss(k0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 8;
k0 += 8;
_val = _mm256_loadu_ps(r0);
_w0 = _mm256_broadcast_ss(k0);
_w1 = _mm256_broadcast_ss(k0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_w2 = _mm256_broadcast_ss(k0 + 2);
_w3 = _mm256_broadcast_ss(k0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
_w4 = _mm256_broadcast_ss(k0 + 4);
_w5 = _mm256_broadcast_ss(k0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val, _w5, _sum5);
_w6 = _mm256_broadcast_ss(k0 + 6);
_w7 = _mm256_broadcast_ss(k0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 8;
k0 += 8;
_val = _mm256_loadu_ps(r0);
_w0 = _mm256_broadcast_ss(k0);
_w1 = _mm256_broadcast_ss(k0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_w2 = _mm256_broadcast_ss(k0 + 2);
_w3 = _mm256_broadcast_ss(k0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
_w4 = _mm256_broadcast_ss(k0 + 4);
_w5 = _mm256_broadcast_ss(k0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val, _w5, _sum5);
_w6 = _mm256_broadcast_ss(k0 + 6);
_w7 = _mm256_broadcast_ss(k0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 8;
k0 += 8;
}
for (; j < nn; j++)
{
__m256 _val = _mm256_loadu_ps(r0);
__m256 _w0 = _mm256_broadcast_ss(k0);
__m256 _w1 = _mm256_broadcast_ss(k0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
__m256 _w2 = _mm256_broadcast_ss(k0 + 2);
__m256 _w3 = _mm256_broadcast_ss(k0 + 3);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
__m256 _w4 = _mm256_broadcast_ss(k0 + 4);
__m256 _w5 = _mm256_broadcast_ss(k0 + 5);
_sum4 = _mm256_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm256_comp_fmadd_ps(_val, _w5, _sum5);
__m256 _w6 = _mm256_broadcast_ss(k0 + 6);
__m256 _w7 = _mm256_broadcast_ss(k0 + 7);
_sum6 = _mm256_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm256_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 8;
k0 += 8;
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output1_tm, _sum1);
_mm256_storeu_ps(output2_tm, _sum2);
_mm256_storeu_ps(output3_tm, _sum3);
_mm256_storeu_ps(output4_tm, _sum4);
_mm256_storeu_ps(output5_tm, _sum5);
_mm256_storeu_ps(output6_tm, _sum6);
_mm256_storeu_ps(output7_tm, _sum7);
output0_tm += 8;
output1_tm += 8;
output2_tm += 8;
output3_tm += 8;
output4_tm += 8;
output5_tm += 8;
output6_tm += 8;
output7_tm += 8;
}
#endif // __AVX__
for (; i + 3 < tiles; i += 4)
{
#if __AVX__
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#else
const float* r0 = bb2.row(i / 4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
__m128 _sum4 = _mm_setzero_ps();
__m128 _sum5 = _mm_setzero_ps();
__m128 _sum6 = _mm_setzero_ps();
__m128 _sum7 = _mm_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m128 _val = _mm_loadu_ps(r0);
__m128 _w0 = _mm_load1_ps(k0);
__m128 _w1 = _mm_load1_ps(k0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
__m128 _w2 = _mm_load1_ps(k0 + 2);
__m128 _w3 = _mm_load1_ps(k0 + 3);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
__m128 _w4 = _mm_load1_ps(k0 + 4);
__m128 _w5 = _mm_load1_ps(k0 + 5);
_sum4 = _mm_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val, _w5, _sum5);
__m128 _w6 = _mm_load1_ps(k0 + 6);
__m128 _w7 = _mm_load1_ps(k0 + 7);
_sum6 = _mm_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 4;
k0 += 8;
_val = _mm_loadu_ps(r0);
_w0 = _mm_load1_ps(k0);
_w1 = _mm_load1_ps(k0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_w2 = _mm_load1_ps(k0 + 2);
_w3 = _mm_load1_ps(k0 + 3);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
_w4 = _mm_load1_ps(k0 + 4);
_w5 = _mm_load1_ps(k0 + 5);
_sum4 = _mm_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val, _w5, _sum5);
_w6 = _mm_load1_ps(k0 + 6);
_w7 = _mm_load1_ps(k0 + 7);
_sum6 = _mm_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 4;
k0 += 8;
_val = _mm_loadu_ps(r0);
_w0 = _mm_load1_ps(k0);
_w1 = _mm_load1_ps(k0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_w2 = _mm_load1_ps(k0 + 2);
_w3 = _mm_load1_ps(k0 + 3);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
_w4 = _mm_load1_ps(k0 + 4);
_w5 = _mm_load1_ps(k0 + 5);
_sum4 = _mm_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val, _w5, _sum5);
_w6 = _mm_load1_ps(k0 + 6);
_w7 = _mm_load1_ps(k0 + 7);
_sum6 = _mm_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 4;
k0 += 8;
_val = _mm_loadu_ps(r0);
_w0 = _mm_load1_ps(k0);
_w1 = _mm_load1_ps(k0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_w2 = _mm_load1_ps(k0 + 2);
_w3 = _mm_load1_ps(k0 + 3);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
_w4 = _mm_load1_ps(k0 + 4);
_w5 = _mm_load1_ps(k0 + 5);
_sum4 = _mm_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val, _w5, _sum5);
_w6 = _mm_load1_ps(k0 + 6);
_w7 = _mm_load1_ps(k0 + 7);
_sum6 = _mm_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 4;
k0 += 8;
}
for (; j < nn; j++)
{
__m128 _val = _mm_loadu_ps(r0);
__m128 _w0 = _mm_load1_ps(k0);
__m128 _w1 = _mm_load1_ps(k0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
__m128 _w2 = _mm_load1_ps(k0 + 2);
__m128 _w3 = _mm_load1_ps(k0 + 3);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
__m128 _w4 = _mm_load1_ps(k0 + 4);
__m128 _w5 = _mm_load1_ps(k0 + 5);
_sum4 = _mm_comp_fmadd_ps(_val, _w4, _sum4);
_sum5 = _mm_comp_fmadd_ps(_val, _w5, _sum5);
__m128 _w6 = _mm_load1_ps(k0 + 6);
__m128 _w7 = _mm_load1_ps(k0 + 7);
_sum6 = _mm_comp_fmadd_ps(_val, _w6, _sum6);
_sum7 = _mm_comp_fmadd_ps(_val, _w7, _sum7);
r0 += 4;
k0 += 8;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
output4_tm += 4;
output5_tm += 4;
output6_tm += 4;
output7_tm += 4;
}
for (; i < tiles; i++)
{
#if __AVX__
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
#else
const float* r0 = bb2.row(i / 4 + i % 4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __AVX__
__m256 _sum = _mm256_setzero_ps();
#else
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
#endif
int j = 0;
for (; j + 3 < nn; j += 4)
{
#if __AVX__
__m256 _val0 = _mm256_broadcast_ss(r0);
__m256 _w0 = _mm256_loadu_ps(k0);
_sum = _mm256_comp_fmadd_ps(_val0, _w0, _sum);
__m256 _val1 = _mm256_broadcast_ss(r0 + 1);
__m256 _w1 = _mm256_loadu_ps(k0 + 8);
_sum = _mm256_comp_fmadd_ps(_val1, _w1, _sum);
__m256 _val2 = _mm256_broadcast_ss(r0 + 2);
__m256 _w2 = _mm256_loadu_ps(k0 + 16);
_sum = _mm256_comp_fmadd_ps(_val2, _w2, _sum);
__m256 _val3 = _mm256_broadcast_ss(r0 + 3);
__m256 _w3 = _mm256_loadu_ps(k0 + 24);
_sum = _mm256_comp_fmadd_ps(_val3, _w3, _sum);
#else
__m128 _val0 = _mm_load1_ps(r0);
__m128 _w00 = _mm_loadu_ps(k0);
__m128 _w01 = _mm_loadu_ps(k0 + 4);
_sum0 = _mm_comp_fmadd_ps(_val0, _w00, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val0, _w01, _sum1);
__m128 _val1 = _mm_load1_ps(r0 + 1);
__m128 _w10 = _mm_loadu_ps(k0 + 8);
__m128 _w11 = _mm_loadu_ps(k0 + 12);
_sum0 = _mm_comp_fmadd_ps(_val1, _w10, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val1, _w11, _sum1);
__m128 _val2 = _mm_load1_ps(r0 + 2);
__m128 _w20 = _mm_loadu_ps(k0 + 16);
__m128 _w21 = _mm_loadu_ps(k0 + 20);
_sum0 = _mm_comp_fmadd_ps(_val2, _w20, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val2, _w21, _sum1);
__m128 _val3 = _mm_load1_ps(r0 + 3);
__m128 _w30 = _mm_loadu_ps(k0 + 24);
__m128 _w31 = _mm_loadu_ps(k0 + 28);
_sum0 = _mm_comp_fmadd_ps(_val3, _w30, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val3, _w31, _sum1);
#endif
r0 += 4;
k0 += 32;
}
for (; j < nn; j++)
{
#if __AVX__
__m256 _val = _mm256_broadcast_ss(r0);
__m256 _w = _mm256_loadu_ps(k0);
_sum = _mm256_comp_fmadd_ps(_val, _w, _sum);
#else
__m128 _val = _mm_load1_ps(r0);
__m128 _w0 = _mm_loadu_ps(k0);
__m128 _w1 = _mm_loadu_ps(k0 + 4);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
#endif
r0 += 1;
k0 += 8;
}
float sum[8];
#if __AVX__
_mm256_storeu_ps(sum, _sum);
#else
_mm_storeu_ps(sum, _sum0);
_mm_storeu_ps(sum + 4, _sum1);
#endif
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output4_tm[0] = sum[4];
output5_tm[0] = sum[5];
output6_tm[0] = sum[6];
output7_tm[0] = sum[7];
output0_tm++;
output1_tm++;
output2_tm++;
output3_tm++;
output4_tm++;
output5_tm++;
output6_tm++;
output7_tm++;
}
}
}
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
const Mat kernel0_tm = kernel_tm.channel(p / 8 + (p % 8) / 4);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __AVX__
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
__m256 _sum1 = _mm256_setzero_ps();
__m256 _sum2 = _mm256_setzero_ps();
__m256 _sum3 = _mm256_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m256 _val = _mm256_loadu_ps(r0);
__m256 _w0 = _mm256_broadcast_ss(k0);
__m256 _w1 = _mm256_broadcast_ss(k0 + 1);
__m256 _w2 = _mm256_broadcast_ss(k0 + 2);
__m256 _w3 = _mm256_broadcast_ss(k0 + 3);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 8;
k0 += 4;
_val = _mm256_loadu_ps(r0);
_w0 = _mm256_broadcast_ss(k0);
_w1 = _mm256_broadcast_ss(k0 + 1);
_w2 = _mm256_broadcast_ss(k0 + 2);
_w3 = _mm256_broadcast_ss(k0 + 3);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 8;
k0 += 4;
_val = _mm256_loadu_ps(r0);
_w0 = _mm256_broadcast_ss(k0);
_w1 = _mm256_broadcast_ss(k0 + 1);
_w2 = _mm256_broadcast_ss(k0 + 2);
_w3 = _mm256_broadcast_ss(k0 + 3);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 8;
k0 += 4;
_val = _mm256_loadu_ps(r0);
_w0 = _mm256_broadcast_ss(k0);
_w1 = _mm256_broadcast_ss(k0 + 1);
_w2 = _mm256_broadcast_ss(k0 + 2);
_w3 = _mm256_broadcast_ss(k0 + 3);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 8;
k0 += 4;
}
for (; j < nn; j++)
{
__m256 _val = _mm256_loadu_ps(r0);
__m256 _w0 = _mm256_broadcast_ss(k0);
__m256 _w1 = _mm256_broadcast_ss(k0 + 1);
__m256 _w2 = _mm256_broadcast_ss(k0 + 2);
__m256 _w3 = _mm256_broadcast_ss(k0 + 3);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm256_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm256_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm256_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 8;
k0 += 4;
}
_mm256_storeu_ps(output0_tm, _sum0);
_mm256_storeu_ps(output1_tm, _sum1);
_mm256_storeu_ps(output2_tm, _sum2);
_mm256_storeu_ps(output3_tm, _sum3);
output0_tm += 8;
output1_tm += 8;
output2_tm += 8;
output3_tm += 8;
}
#endif // __AVX__
for (; i + 3 < tiles; i += 4)
{
#if __AVX__
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#else
const float* r0 = bb2.row(i / 4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
__m128 _sum1 = _mm_setzero_ps();
__m128 _sum2 = _mm_setzero_ps();
__m128 _sum3 = _mm_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m128 _val = _mm_loadu_ps(r0);
__m128 _w0 = _mm_load1_ps(k0);
__m128 _w1 = _mm_load1_ps(k0 + 1);
__m128 _w2 = _mm_load1_ps(k0 + 2);
__m128 _w3 = _mm_load1_ps(k0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 4;
k0 += 4;
_val = _mm_loadu_ps(r0);
_w0 = _mm_load1_ps(k0);
_w1 = _mm_load1_ps(k0 + 1);
_w2 = _mm_load1_ps(k0 + 2);
_w3 = _mm_load1_ps(k0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 4;
k0 += 4;
_val = _mm_loadu_ps(r0);
_w0 = _mm_load1_ps(k0);
_w1 = _mm_load1_ps(k0 + 1);
_w2 = _mm_load1_ps(k0 + 2);
_w3 = _mm_load1_ps(k0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 4;
k0 += 4;
_val = _mm_loadu_ps(r0);
_w0 = _mm_load1_ps(k0);
_w1 = _mm_load1_ps(k0 + 1);
_w2 = _mm_load1_ps(k0 + 2);
_w3 = _mm_load1_ps(k0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 4;
k0 += 4;
}
for (; j < nn; j++)
{
__m128 _val = _mm_loadu_ps(r0);
__m128 _w0 = _mm_load1_ps(k0);
__m128 _w1 = _mm_load1_ps(k0 + 1);
__m128 _w2 = _mm_load1_ps(k0 + 2);
__m128 _w3 = _mm_load1_ps(k0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
_sum1 = _mm_comp_fmadd_ps(_val, _w1, _sum1);
_sum2 = _mm_comp_fmadd_ps(_val, _w2, _sum2);
_sum3 = _mm_comp_fmadd_ps(_val, _w3, _sum3);
r0 += 4;
k0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
}
for (; i < tiles; i++)
{
#if __AVX__
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
#else
const float* r0 = bb2.row(i / 4 + i % 4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m128 _sum = _mm_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m128 _val0 = _mm_load1_ps(r0);
__m128 _w0 = _mm_loadu_ps(k0);
_sum = _mm_comp_fmadd_ps(_val0, _w0, _sum);
__m128 _val1 = _mm_load1_ps(r0 + 1);
__m128 _w1 = _mm_loadu_ps(k0 + 4);
_sum = _mm_comp_fmadd_ps(_val1, _w1, _sum);
__m128 _val2 = _mm_load1_ps(r0 + 2);
__m128 _w2 = _mm_loadu_ps(k0 + 8);
_sum = _mm_comp_fmadd_ps(_val2, _w2, _sum);
__m128 _val3 = _mm_load1_ps(r0 + 3);
__m128 _w3 = _mm_loadu_ps(k0 + 12);
_sum = _mm_comp_fmadd_ps(_val3, _w3, _sum);
r0 += 4;
k0 += 16;
}
for (; j < nn; j++)
{
__m128 _val = _mm_load1_ps(r0);
__m128 _w0 = _mm_loadu_ps(k0);
_sum = _mm_comp_fmadd_ps(_val, _w0, _sum);
r0 += 1;
k0 += 4;
}
float sum[4];
_mm_storeu_ps(sum, _sum);
output0_tm[0] = sum[0];
output1_tm[0] = sum[1];
output2_tm[0] = sum[2];
output3_tm[0] = sum[3];
output0_tm++;
output1_tm++;
output2_tm++;
output3_tm++;
}
}
}
remain_outch_start += nn_outch << 2;
#else
int remain_outch_start = 0;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
#if __SSE2__
const Mat kernel0_tm = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const Mat kernel0_tm = kernel_tm.channel(p);
#endif
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __SSE2__
#if __AVX__
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m256 _sum0 = _mm256_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m256 _val0 = _mm256_loadu_ps(r0);
__m256 _w0 = _mm256_broadcast_ss(k0);
_sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0);
__m256 _val1 = _mm256_loadu_ps(r0 + 8);
__m256 _w1 = _mm256_broadcast_ss(k0 + 1);
_sum0 = _mm256_comp_fmadd_ps(_val1, _w1, _sum0);
__m256 _val2 = _mm256_loadu_ps(r0 + 16);
__m256 _w2 = _mm256_broadcast_ss(k0 + 2);
_sum0 = _mm256_comp_fmadd_ps(_val2, _w2, _sum0);
__m256 _val3 = _mm256_loadu_ps(r0 + 24);
__m256 _w3 = _mm256_broadcast_ss(k0 + 3);
_sum0 = _mm256_comp_fmadd_ps(_val3, _w3, _sum0);
r0 += 32;
k0 += 4;
}
for (; j < nn; j++)
{
__m256 _val = _mm256_loadu_ps(r0);
__m256 _w0 = _mm256_broadcast_ss(k0);
_sum0 = _mm256_comp_fmadd_ps(_val, _w0, _sum0);
r0 += 8;
k0++;
}
_mm256_storeu_ps(output0_tm, _sum0);
output0_tm += 8;
}
#endif // __AVX__
for (; i + 3 < tiles; i += 4)
{
#if __AVX__
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#else
const float* r0 = bb2.row(i / 4);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
__m128 _sum0 = _mm_setzero_ps();
int j = 0;
for (; j + 3 < nn; j += 4)
{
__m128 _val0 = _mm_loadu_ps(r0);
__m128 _w0 = _mm_load1_ps(k0);
_sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0);
__m128 _val1 = _mm_loadu_ps(r0 + 4);
__m128 _w1 = _mm_load1_ps(k0 + 1);
_sum0 = _mm_comp_fmadd_ps(_val1, _w1, _sum0);
__m128 _val2 = _mm_loadu_ps(r0 + 8);
__m128 _w2 = _mm_load1_ps(k0 + 2);
_sum0 = _mm_comp_fmadd_ps(_val2, _w2, _sum0);
__m128 _val3 = _mm_loadu_ps(r0 + 12);
__m128 _w3 = _mm_load1_ps(k0 + 3);
_sum0 = _mm_comp_fmadd_ps(_val3, _w3, _sum0);
r0 += 16;
k0 += 4;
}
for (; j < nn; j++)
{
__m128 _val = _mm_loadu_ps(r0);
__m128 _w0 = _mm_load1_ps(k0);
_sum0 = _mm_comp_fmadd_ps(_val, _w0, _sum0);
r0 += 4;
k0++;
}
_mm_storeu_ps(output0_tm, _sum0);
output0_tm += 4;
}
#endif // __SSE2__
for (; i < tiles; i++)
{
#if __AVX__
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
#elif __SSE2__
const float* r0 = bb2.row(i / 4 + i % 4);
#else
const float* r0 = bb2.row(i);
#endif
const float* k0 = kernel0_tm.row(r);
int nn = inch; // inch always > 0
float sum = 0.f;
for (int j = 0; j < nn; j++)
{
float w0 = k0[0];
float val0 = r0[0];
sum += val0 * w0;
r0 += 1;
k0 += 1;
}
output0_tm[0] = sum;
output0_tm += 1;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
}
{
conv3x3s1_winograd43_transform_output_sse(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 9 + q * 9;
const float* r0 = img;
const float* r1 = img + w;
const float* r2 = img + w * 2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
}
|
stream.c | /*-----------------------------------------------------------------------*/
/* Program: Stream */
/* Revision: $Id: stream.c,v 5.9 2009/04/11 16:35:00 mccalpin Exp $ */
/* Original code developed by John D. McCalpin */
/* Programmers: John D. McCalpin */
/* Joe R. Zagar */
/* */
/* This program measures memory transfer rates in MB/s for simple */
/* computational kernels coded in C. */
/*-----------------------------------------------------------------------*/
/* Copyright 1991-2005: John D. McCalpin */
/*-----------------------------------------------------------------------*/
/* License: */
/* 1. You are free to use this program and/or to redistribute */
/* this program. */
/* 2. You are free to modify this program for your own use, */
/* including commercial use, subject to the publication */
/* restrictions in item 3. */
/* 3. You are free to publish results obtained from running this */
/* program, or from works that you derive from this program, */
/* with the following limitations: */
/* 3a. In order to be referred to as "STREAM benchmark results", */
/* published results must be in conformance to the STREAM */
/* Run Rules, (briefly reviewed below) published at */
/* http://www.cs.virginia.edu/stream/ref.html */
/* and incorporated herein by reference. */
/* As the copyright holder, John McCalpin retains the */
/* right to determine conformity with the Run Rules. */
/* 3b. Results based on modified source code or on runs not in */
/* accordance with the STREAM Run Rules must be clearly */
/* labelled whenever they are published. Examples of */
/* proper labelling include: */
/* "tuned STREAM benchmark results" */
/* "based on a variant of the STREAM benchmark code" */
/* Other comparable, clear and reasonable labelling is */
/* acceptable. */
/* 3c. Submission of results to the STREAM benchmark web site */
/* is encouraged, but not required. */
/* 4. Use of this program or creation of derived works based on this */
/* program constitutes acceptance of these licensing restrictions. */
/* 5. Absolutely no warranty is expressed or implied. */
/*-----------------------------------------------------------------------*/
# include <stdio.h>
# include <math.h>
# include <float.h>
# include <limits.h>
# include <sys/time.h>
/* INSTRUCTIONS:
*
* 1) Stream requires a good bit of memory to run. Adjust the
* value of 'N' (below) to give a 'timing calibration' of
* at least 20 clock-ticks. This will provide rate estimates
* that should be good to about 5% precision.
*/
#ifndef N
# define N 2000000
#endif
#ifndef NTIMES
# define NTIMES 10
#endif
#ifndef OFFSET
# define OFFSET 0
#endif
/*
* 3) Compile the code with full optimization. Many compilers
* generate unreasonably bad code before the optimizer tightens
* things up. If the results are unreasonably good, on the
* other hand, the optimizer might be too smart for me!
*
* Try compiling with:
* cc -O stream_omp.c -o stream_omp
*
* This is known to work on Cray, SGI, IBM, and Sun machines.
*
*
* 4) Mail the results to mccalpin@cs.virginia.edu
* Be sure to include:
* a) computer hardware model number and software revision
* b) the compiler flags
* c) all of the output from the test case.
* Thanks!
*
*/
# define HLINE "-------------------------------------------------------------\n"
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
static double a[N+OFFSET],
b[N+OFFSET],
c[N+OFFSET];
static double avgtime[4] = {0}, maxtime[4] = {0},
mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX};
static char *label[4] = {"Copy: ", "Scale: ",
"Add: ", "Triad: "};
static double bytes[4] = {
2 * sizeof(double) * N,
2 * sizeof(double) * N,
3 * sizeof(double) * N,
3 * sizeof(double) * N
};
extern double mysecond();
extern void checkSTREAMresults();
#ifdef TUNED
extern void tuned_STREAM_Copy();
extern void tuned_STREAM_Scale(double scalar);
extern void tuned_STREAM_Add();
extern void tuned_STREAM_Triad(double scalar);
#endif
#ifdef _OPENMP
extern int omp_get_num_threads();
#endif
int
main()
{
int quantum, checktick();
int BytesPerWord;
register int j, k;
double scalar, t, times[4][NTIMES];
/* --- SETUP --- determine precision and check timing --- */
printf(HLINE);
printf("STREAM version $Revision: 5.9 $\n");
printf(HLINE);
BytesPerWord = sizeof(double);
printf("This system uses %d bytes per DOUBLE PRECISION word.\n",
BytesPerWord);
printf(HLINE);
#ifdef NO_LONG_LONG
printf("Array size = %d, Offset = %d\n" , N, OFFSET);
#else
printf("Array size = %llu, Offset = %d\n", (unsigned long long) N, OFFSET);
#endif
printf("Total memory required = %.1f MB.\n",
(3.0 * BytesPerWord) * ( (double) N / 1048576.0));
printf("Each test is run %d times, but only\n", NTIMES);
printf("the *best* time for each is used.\n");
#ifdef _OPENMP
printf(HLINE);
#pragma omp parallel
{
#pragma omp master
{
k = omp_get_num_threads();
printf ("Number of Threads requested = %i\n",k);
}
}
#endif
printf(HLINE);
#pragma omp parallel
{
printf ("Printing one line per active thread....\n");
}
/* Get initial value for system clock. */
#pragma omp parallel for
for (j=0; j<N; j++) {
a[j] = 1.0;
b[j] = 2.0;
c[j] = 0.0;
}
printf(HLINE);
if ( (quantum = checktick()) >= 1)
printf("Your clock granularity/precision appears to be "
"%d microseconds.\n", quantum);
else {
printf("Your clock granularity appears to be "
"less than one microsecond.\n");
quantum = 1;
}
t = mysecond();
#pragma omp parallel for
for (j = 0; j < N; j++)
a[j] = 2.0E0 * a[j];
t = 1.0E6 * (mysecond() - t);
printf("Each test below will take on the order"
" of %d microseconds.\n", (int) t );
printf(" (= %d clock ticks)\n", (int) (t/quantum) );
printf("Increase the size of the arrays if this shows that\n");
printf("you are not getting at least 20 clock ticks per test.\n");
printf(HLINE);
printf("WARNING -- The above is only a rough guideline.\n");
printf("For best results, please be sure you know the\n");
printf("precision of your system timer.\n");
printf(HLINE);
/* --- MAIN LOOP --- repeat test cases NTIMES times --- */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
times[0][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Copy();
#else
#pragma omp parallel for
for (j=0; j<N; j++)
c[j] = a[j];
#endif
times[0][k] = mysecond() - times[0][k];
times[1][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Scale(scalar);
#else
#pragma omp parallel for
for (j=0; j<N; j++)
b[j] = scalar*c[j];
#endif
times[1][k] = mysecond() - times[1][k];
times[2][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Add();
#else
#pragma omp parallel for
for (j=0; j<N; j++)
c[j] = a[j]+b[j];
#endif
times[2][k] = mysecond() - times[2][k];
times[3][k] = mysecond();
#ifdef TUNED
tuned_STREAM_Triad(scalar);
#else
#pragma omp parallel for
for (j=0; j<N; j++)
a[j] = b[j]+scalar*c[j];
#endif
times[3][k] = mysecond() - times[3][k];
}
/* --- SUMMARY --- */
for (k=1; k<NTIMES; k++) /* note -- skip first iteration */
{
for (j=0; j<4; j++)
{
avgtime[j] = avgtime[j] + times[j][k];
mintime[j] = MIN(mintime[j], times[j][k]);
maxtime[j] = MAX(maxtime[j], times[j][k]);
}
}
printf("Function Rate (MB/s) Avg time Min time Max time\n");
for (j=0; j<4; j++) {
avgtime[j] = avgtime[j]/(double)(NTIMES-1);
printf("%s%11.4f %11.4f %11.4f %11.4f\n", label[j],
1.0E-06 * bytes[j]/mintime[j],
avgtime[j],
mintime[j],
maxtime[j]);
}
printf(HLINE);
/* --- Check Results --- */
checkSTREAMresults();
printf(HLINE);
return 0;
}
# define M 20
int
checktick()
{
int i, minDelta, Delta;
double t1, t2, timesfound[M];
/* Collect a sequence of M unique time values from the system. */
for (i = 0; i < M; i++) {
t1 = mysecond();
while( ((t2=mysecond()) - t1) < 1.0E-6 )
;
timesfound[i] = t1 = t2;
}
/*
* Determine the minimum difference between these M values.
* This result will be our estimate (in microseconds) for the
* clock granularity.
*/
minDelta = 1000000;
for (i = 1; i < M; i++) {
Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1]));
minDelta = MIN(minDelta, MAX(Delta,0));
}
return(minDelta);
}
/* A gettimeofday routine to give access to the wall
clock timer on most UNIX-like systems. */
#include <sys/time.h>
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
void checkSTREAMresults ()
{
double aj,bj,cj,scalar;
double asum,bsum,csum;
double epsilon;
int j,k;
/* reproduce initialization */
aj = 1.0;
bj = 2.0;
cj = 0.0;
/* a[] is modified during timing check */
aj = 2.0E0 * aj;
/* now execute timing loop */
scalar = 3.0;
for (k=0; k<NTIMES; k++)
{
cj = aj;
bj = scalar*cj;
cj = aj+bj;
aj = bj+scalar*cj;
}
aj = aj * (double) (N);
bj = bj * (double) (N);
cj = cj * (double) (N);
asum = 0.0;
bsum = 0.0;
csum = 0.0;
for (j=0; j<N; j++) {
asum += a[j];
bsum += b[j];
csum += c[j];
}
#ifdef VERBOSE
printf ("Results Comparison: \n");
printf (" Expected : %f %f %f \n",aj,bj,cj);
printf (" Observed : %f %f %f \n",asum,bsum,csum);
#endif
#ifndef abs
#define abs(a) ((a) >= 0 ? (a) : -(a))
#endif
epsilon = 1.e-8;
if (abs(aj-asum)/asum > epsilon) {
printf ("Failed Validation on array a[]\n");
printf (" Expected : %f \n",aj);
printf (" Observed : %f \n",asum);
}
else if (abs(bj-bsum)/bsum > epsilon) {
printf ("Failed Validation on array b[]\n");
printf (" Expected : %f \n",bj);
printf (" Observed : %f \n",bsum);
}
else if (abs(cj-csum)/csum > epsilon) {
printf ("Failed Validation on array c[]\n");
printf (" Expected : %f \n",cj);
printf (" Observed : %f \n",csum);
}
else {
printf ("Solution Validates\n");
}
}
void tuned_STREAM_Copy()
{
int j;
#pragma omp parallel for
for (j=0; j<N; j++)
c[j] = a[j];
}
void tuned_STREAM_Scale(double scalar)
{
int j;
#pragma omp parallel for
for (j=0; j<N; j++)
b[j] = scalar*c[j];
}
void tuned_STREAM_Add()
{
int j;
#pragma omp parallel for
for (j=0; j<N; j++)
c[j] = a[j]+b[j];
}
void tuned_STREAM_Triad(double scalar)
{
int j;
#pragma omp parallel for
for (j=0; j<N; j++)
a[j] = b[j]+scalar*c[j];
}
|
colorspace.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE %
% C O O L O O R R SS P P A A C E %
% C O O L O O RRRR SSS PPPP AAAAA C EEE %
% C O O L O O R R SS P A A C E %
% CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE %
% %
% %
% MagickCore Image Colorspace Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/property.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef struct _TransformPacket
{
MagickRealType
x,
y,
z;
} TransformPacket;
/*
Forward declarations.
*/
static MagickBooleanType
TransformsRGBImage(Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o l o r s p a c e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageColorspaceType() returns the potential type of image:
% sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc.
%
% To ensure the image type matches its potential, use SetImageColorspaceType():
%
% (void) SetImageColorspaceType(image,GetImageColorspaceType(image),
% exception);
%
% The format of the GetImageColorspaceType method is:
%
% ColorspaceType GetImageColorspaceType(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ColorspaceType GetImageColorspaceType(const Image *image,
ExceptionInfo *exception)
{
ColorspaceType
colorspace;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colorspace=image->colorspace;
type=IdentifyImageType(image,exception);
if (IsGrayImageType(type))
colorspace=GRAYColorspace;
return(colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ s R G B T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sRGBTransformImage() converts the reference image from sRGB to an alternate
% colorspace. The transformation matrices are not the standard ones: the
% weights are rescaled to normalized the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the sRGBTransformImage method is:
%
% MagickBooleanType sRGBTransformImage(Image *image,
% const ColorspaceType colorspace,EsceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertAdobe98ToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertAdobe98ToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertDisplayP3ToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertDisplayP3ToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertProPhotoToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertProPhotoToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertRGBToCMY(const double red,const double green,
const double blue,double *cyan,double *magenta,double *yellow)
{
*cyan=QuantumScale*(QuantumRange-red);
*magenta=QuantumScale*(QuantumRange-green);
*yellow=QuantumScale*(QuantumRange-blue);
}
static void ConvertRGBToAdobe98(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToAdobe98(X,Y,Z,r,g,b);
}
static void ConvertRGBToDisplayP3(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToDisplayP3(X,Y,Z,r,g,b);
}
static void ConvertRGBToProPhoto(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToProPhoto(X,Y,Z,r,g,b);
}
static inline void ConvertXYZToLMS(const double x,const double y,
const double z,double *L,double *M,double *S)
{
*L=0.7328*x+0.4296*y-0.1624*z;
*M=(-0.7036*x+1.6975*y+0.0061*z);
*S=0.0030*x+0.0136*y+0.9834*z;
}
static void ConvertRGBToLMS(const double red,const double green,
const double blue,double *L,double *M,double *S)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLMS(X,Y,Z,L,M,S);
}
static void ConvertRGBToLuv(const double red,const double green,
const double blue,const IlluminantType illuminant,double *L,double *u,
double *v)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLuv(X,Y,Z,illuminant,L,u,v);
}
static void ConvertRGBToxyY(const double red,const double green,
const double blue,double *low_x,double *low_y,double *cap_Y)
{
double
gamma,
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
gamma=PerceptibleReciprocal(X+Y+Z);
*low_x=gamma*X;
*low_y=gamma*Y;
*cap_Y=Y;
}
static void inline ConvertXYZToJzazbz(const double X,const double Y,
const double Z,const double white_luminance,double *Jz,double *az,double *bz)
{
#define Jzazbz_b 1.15 /* https://observablehq.com/@jrus/jzazbz */
#define Jzazbz_g 0.66
#define Jzazbz_c1 (3424.0/4096.0)
#define Jzazbz_c2 (2413.0/128.0)
#define Jzazbz_c3 (2392.0/128.0)
#define Jzazbz_n (2610.0/16384.0)
#define Jzazbz_p (1.7*2523.0/32.0)
#define Jzazbz_d (-0.56)
#define Jzazbz_d0 (1.6295499532821566e-11)
double
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
Xp=(Jzazbz_b*X-Z*(Jzazbz_b-1));
Yp=(Jzazbz_g*Y-X*(Jzazbz_g-1));
Zp=Z;
L=0.41478972*Xp+0.579999*Yp+0.0146480*Zp;
M=(-0.2015100)*Xp+1.120649*Yp+0.0531008*Zp;
S=(-0.0166008)*Xp+0.264800*Yp+0.6684799*Zp;
gamma=pow(L*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Lp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(M*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Mp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(S*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Sp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
Iz=0.5*Lp+0.5*Mp;
*az=3.52400*Lp-4.066708*Mp+0.542708*Sp+0.5;
*bz=0.199076*Lp+1.096799*Mp-1.295875*Sp+0.5;
*Jz=((Jzazbz_d+1.0)*Iz)/(Jzazbz_d*Iz+1.0)-Jzazbz_d0;
}
static void inline ConvertJzazbzToXYZ(const double Jz,const double az,
const double bz,const double white_luminance,double *X,double *Y,double *Z)
{
double
azz,
bzz,
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
gamma=Jz+Jzazbz_d0;
Iz=gamma/(Jzazbz_d-Jzazbz_d*gamma+1.0);
azz=az-0.5;
bzz=bz-0.5;
Lp=Iz+0.138605043271539*azz+0.0580473161561189*bzz;
Mp=Iz-0.138605043271539*azz-0.0580473161561189*bzz;
Sp=Iz-0.0960192420263189*azz-0.811891896056039*bzz;
gamma=pow(Lp,1.0/Jzazbz_p);
L=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Mp,1.0/Jzazbz_p);
M=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Sp,1.0/Jzazbz_p);
S=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
Xp=1.92422643578761*L-1.00479231259537*M+0.037651404030618*S;
Yp=0.350316762094999*L+0.726481193931655*M-0.065384422948085*S;
Zp=(-0.0909828109828476)*L-0.312728290523074*M+1.52276656130526*S;
*X=(Xp+(Jzazbz_b-1.0)*Zp)/Jzazbz_b;
*Y=(Yp+(Jzazbz_g-1.0)**X)/Jzazbz_g;
*Z=Zp;
}
static void ConvertRGBToJzazbz(const double red,const double green,
const double blue,const double white_luminance,double *Jz,double *az,
double *bz)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,blue,green,&X,&Y,&Z);
ConvertXYZToJzazbz(X,Y,Z,white_luminance,Jz,az,bz);
}
static void ConvertJzazbzToRGB(const double Jz,const double az,
const double bz,const double white_luminance,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertJzazbzToXYZ(Jz,az,bz,white_luminance,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,blue,green);
}
static void ConvertRGBToYDbDr(const double red,const double green,
const double blue,double *Y,double *Db,double *Dr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5;
*Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5;
}
static void ConvertRGBToYIQ(const double red,const double green,
const double blue,double *Y,double *I,double *Q)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5;
*Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5;
}
static void ConvertRGBToYPbPr(const double red,const double green,
const double blue,double *Y,double *Pb,double *Pr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5;
*Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5;
}
static void ConvertRGBToYCbCr(const double red,const double green,
const double blue,double *Y,double *Cb,double *Cr)
{
ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr);
}
static void ConvertRGBToYUV(const double red,const double green,
const double blue,double *Y,double *U,double *V)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5;
*V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5;
}
static MagickBooleanType sRGBTransformImage(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define sRGBTransformImageTag "RGBTransform/Image"
CacheView
*image_view;
const char
*artifact;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
primary_info;
ssize_t
i;
ssize_t
y;
TransformPacket
*x_map,
*y_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(colorspace != sRGBColorspace);
assert(colorspace != TransparentColorspace);
assert(colorspace != UndefinedColorspace);
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
illuminant=UndefinedIlluminant;
}
status=MagickTrue;
progress=0;
switch (colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Convert RGB to CMYK colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertRGBToCMYK(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*DecodePixelGamma(GetPixelRed(image,q))+0.715158*
DecodePixelGamma(GetPixelGreen(image,q))+0.072186*
DecodePixelGamma(GetPixelBlue(image,q));
SetPixelGray(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case GRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelGray(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case CMYColorspace:
case Adobe98Colorspace:
case DisplayP3Colorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case ProPhotoColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from sRGB to target colorspace.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case Adobe98Colorspace:
{
ConvertRGBToAdobe98(red,green,blue,&X,&Y,&Z);
break;
}
case CMYColorspace:
{
ConvertRGBToCMY(red,green,blue,&X,&Y,&Z);
break;
}
case DisplayP3Colorspace:
{
ConvertRGBToDisplayP3(red,green,blue,&X,&Y,&Z);
break;
}
case HCLColorspace:
{
ConvertRGBToHCL(red,green,blue,&X,&Y,&Z);
break;
}
case HCLpColorspace:
{
ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(red,green,blue,&X,&Y,&Z);
break;
}
case HSIColorspace:
{
ConvertRGBToHSI(red,green,blue,&X,&Y,&Z);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(red,green,blue,&X,&Y,&Z);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(red,green,blue,&X,&Y,&Z);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(red,green,blue,&X,&Y,&Z);
break;
}
case JzazbzColorspace:
{
ConvertRGBToJzazbz(red,green,blue,white_luminance,&X,&Y,&Z);
break;
}
case LabColorspace:
{
ConvertRGBToLab(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertRGBToLCHab(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LCHuvColorspace:
{
ConvertRGBToLCHuv(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LMSColorspace:
{
ConvertRGBToLMS(red,green,blue,&X,&Y,&Z);
break;
}
case LuvColorspace:
{
ConvertRGBToLuv(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case ProPhotoColorspace:
{
ConvertRGBToProPhoto(red,green,blue,&X,&Y,&Z);
break;
}
case xyYColorspace:
{
ConvertRGBToxyY(red,green,blue,&X,&Y,&Z);
break;
}
case XYZColorspace:
{
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
break;
}
case YCbCrColorspace:
{
ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z);
break;
}
case YDbDrColorspace:
{
ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z);
break;
}
case YIQColorspace:
{
ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z);
break;
}
case YPbPrColorspace:
{
ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z);
break;
}
case YUVColorspace:
{
ConvertRGBToYUV(red,green,blue,&X,&Y,&Z);
break;
}
default:
{
X=QuantumScale*red;
Y=QuantumScale*green;
Z=QuantumScale*blue;
break;
}
}
SetPixelRed(image,ClampToQuantum(QuantumRange*X),q);
SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q);
SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
#define DisplayGamma (1.0/1.7)
#define FilmGamma 0.6
#define ReferenceBlack 95.0
#define ReferenceWhite 685.0
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform RGB to Log colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+
log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002*
PerceptibleReciprocal(film_gamma)))/1024.0));
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) DecodePixelGamma((MagickRealType)
GetPixelRed(image,q));
green=(double) DecodePixelGamma((MagickRealType)
GetPixelGreen(image,q));
blue=(double) DecodePixelGamma((MagickRealType)
GetPixelBlue(image,q));
SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q);
SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))],
q);
SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform image from sRGB to linear RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(&primary_info,0,sizeof(primary_info));
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.33333*(double) i);
x_map[i].y=(MagickRealType) (0.50000*(double) i);
x_map[i].z=(MagickRealType) (-0.25000*(double) i);
y_map[i].x=(MagickRealType) (0.33334*(double) i);
y_map[i].y=(MagickRealType) (0.00000*(double) i);
y_map[i].z=(MagickRealType) (0.50000*(double) i);
z_map[i].x=(MagickRealType) (0.33333*(double) i);
z_map[i].y=(MagickRealType) (-0.50000*(double) i);
z_map[i].z=(MagickRealType) (-0.25000*(double) i);
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.601):
Y = 0.2988390*R+0.5868110*G+0.1143500*B
Cb= -0.1687367*R-0.3312640*G+0.5000000*B
Cr= 0.5000000*R-0.4186880*G-0.0813120*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
x_map[i].y=(MagickRealType) (-0.1687367*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
y_map[i].y=(MagickRealType) (-0.331264*(double) i);
y_map[i].z=(MagickRealType) (-0.418688*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.081312*(double) i);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.709):
Y = 0.212656*R+0.715158*G+0.072186*B
Cb= -0.114572*R-0.385428*G+0.500000*B
Cr= 0.500000*R-0.454153*G-0.045847*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
x_map[i].y=(MagickRealType) (-0.114572*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
y_map[i].y=(MagickRealType) (-0.385428*(double) i);
y_map[i].z=(MagickRealType) (-0.454153*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.045847*(double) i);
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
Y = 0.298839*R+0.586811*G+0.114350*B
C1= -0.298839*R-0.586811*G+0.88600*B
C2= 0.70100*R-0.586811*G-0.114350*B
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156));
primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137));
for (i=0; i <= (ssize_t) (0.018*MaxMap); i++)
{
x_map[i].x=0.005382*i;
x_map[i].y=(-0.003296)*i;
x_map[i].z=0.009410*i;
y_map[i].x=0.010566*i;
y_map[i].y=(-0.006471)*i;
y_map[i].z=(-0.007880)*i;
z_map[i].x=0.002052*i;
z_map[i].y=0.009768*i;
z_map[i].z=(-0.001530)*i;
}
for ( ; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.298839*(1.099*i-0.099);
x_map[i].y=(-0.298839)*(1.099*i-0.099);
x_map[i].z=0.70100*(1.099*i-0.099);
y_map[i].x=0.586811*(1.099*i-0.099);
y_map[i].y=(-0.586811)*(1.099*i-0.099);
y_map[i].z=(-0.586811)*(1.099*i-0.099);
z_map[i].x=0.114350*(1.099*i-0.099);
z_map[i].y=0.88600*(1.099*i-0.099);
z_map[i].z=(-0.114350)*(1.099*i-0.099);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
x_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].x=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
z_map[i].y=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert from sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
unsigned int
blue,
green,
red;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelRed(image,q)));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelGreen(image,q)));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelBlue(image,q)));
pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+
primary_info.x;
pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+
primary_info.y;
pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+
primary_info.z;
SetPixelRed(image,ScaleMapToQuantum(pixel.red),q);
SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q);
SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,sRGBTransformImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
unsigned int
blue,
green,
red;
/*
Convert PseudoClass image.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z;
image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red);
image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green);
image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorspace() sets the colorspace member of the Image structure.
%
% The format of the SetImageColorspace method is:
%
% MagickBooleanType SetImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptiionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
ImageType
type;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->colorspace == colorspace)
return(MagickTrue);
image->colorspace=colorspace;
image->rendering_intent=UndefinedIntent;
image->gamma=1.000/2.200;
(void) memset(&image->chromaticity,0,sizeof(image->chromaticity));
type=image->type;
if (IsGrayColorspace(colorspace) != MagickFalse)
{
if (colorspace == LinearGRAYColorspace)
image->gamma=1.000;
type=GrayscaleType;
}
else
if ((IsRGBColorspace(colorspace) != MagickFalse) ||
(colorspace == XYZColorspace) || (colorspace == xyYColorspace))
image->gamma=1.000;
else
{
image->rendering_intent=PerceptualIntent;
image->chromaticity.red_primary.x=0.6400;
image->chromaticity.red_primary.y=0.3300;
image->chromaticity.red_primary.z=0.0300;
image->chromaticity.green_primary.x=0.3000;
image->chromaticity.green_primary.y=0.6000;
image->chromaticity.green_primary.z=0.1000;
image->chromaticity.blue_primary.x=0.1500;
image->chromaticity.blue_primary.y=0.0600;
image->chromaticity.blue_primary.z=0.7900;
image->chromaticity.white_point.x=0.3127;
image->chromaticity.white_point.y=0.3290;
image->chromaticity.white_point.z=0.3583;
}
status=SyncImagePixelCache(image,exception);
image->type=type;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageGray() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities and changes the type of the image to
% bi-level or grayscale.
%
% The format of the SetImageGray method is:
%
% MagickBooleanType SetImageGray(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageGray(Image *image,
ExceptionInfo *exception)
{
const char
*value;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsImageGray(image) != MagickFalse)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale",exception);
if (IsStringFalse(value) != MagickFalse)
return(MagickFalse);
type=IdentifyImageGray(image,exception);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMonochrome() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange and changes the type of the image to bi-level.
%
% The format of the SetImageMonochrome method is:
%
% MagickBooleanType SetImageMonochrome(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMonochrome(Image *image,
ExceptionInfo *exception)
{
MagickBooleanType
is_bilevel;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsImageMonochrome(image) != MagickFalse)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
is_bilevel=IdentifyImageMonochrome(image,exception);
if (is_bilevel == MagickFalse)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=BilevelType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImageColorspace() transforms an image colorspace, changing the
% image data to reflect the new colorspace.
%
% The format of the TransformImageColorspace method is:
%
% MagickBooleanType TransformImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransformImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == colorspace)
return(SetImageColorspace(image,colorspace,exception));
(void) DeleteImageProfile(image,"icc");
(void) DeleteImageProfile(image,"icm");
if (colorspace == UndefinedColorspace)
return(SetImageColorspace(image,colorspace,exception));
/*
Convert the reference image from an alternate colorspace to sRGB.
*/
if (IssRGBColorspace(colorspace) != MagickFalse)
return(TransformsRGBImage(image,exception));
status=MagickTrue;
if (IssRGBColorspace(image->colorspace) == MagickFalse)
status=TransformsRGBImage(image,exception);
if (status == MagickFalse)
return(status);
/*
Convert the reference image from sRGB to an alternate colorspace.
*/
if (sRGBTransformImage(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m s R G B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformsRGBImage() converts the reference image from an alternate
% colorspace to sRGB. The transformation matrices are not the standard ones:
% the weights are rescaled to normalize the range of the transformed values
% to be [0..QuantumRange].
%
% The format of the TransformsRGBImage method is:
%
% MagickBooleanType TransformsRGBImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertCMYToRGB(const double cyan,const double magenta,
const double yellow,double *red,double *green,double *blue)
{
*red=QuantumRange*(1.0-cyan);
*green=QuantumRange*(1.0-magenta);
*blue=QuantumRange*(1.0-yellow);
}
static inline void ConvertLMSToXYZ(const double L,const double M,const double S,
double *X,double *Y,double *Z)
{
*X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S;
*Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S;
*Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S;
}
static inline void ConvertLMSToRGB(const double L,const double M,
const double S,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLMSToXYZ(L,M,S,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertLuvToRGB(const double L,const double u,
const double v,const IlluminantType illuminant,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,illuminant,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline ssize_t RoundToYCC(const double value)
{
if (value <= 0.0)
return(0);
if (value >= 1388.0)
return(1388);
return((ssize_t) (value+0.5));
}
static inline void ConvertLabToRGB(const double L,const double a,
const double b,const IlluminantType illuminant,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),illuminant,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertxyYToRGB(const double low_x,const double low_y,
const double cap_Y,double *red,double *green,double *blue)
{
double
gamma,
X,
Y,
Z;
gamma=PerceptibleReciprocal(low_y);
X=gamma*cap_Y*low_x;
Y=cap_Y;
Z=gamma*cap_Y*(1.0-low_x-low_y);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+
1.4019995886561440468*(Pr-0.5));
*green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)-
0.71413649331646789076*(Pr-0.5));
*blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+
2.1453384174593273e-06*(Pr-0.5));
}
static void ConvertYCbCrToRGB(const double Y,const double Cb,
const double Cr,double *red,double *green,double *blue)
{
ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue);
}
static void ConvertYIQToRGB(const double Y,const double I,const double Q,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754*
(Q-0.5));
*green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427*
(Q-0.5));
*blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374*
(Q-0.5));
}
static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)-
0.52591263066186533*(Dr-0.5));
*green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+
0.26789932820759876*(Dr-0.5));
*blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)-
7.9202543533108e-05*(Dr-0.5));
}
static void ConvertYUVToRGB(const double Y,const double U,const double V,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825*
(V-0.5));
*green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797*
(V-0.5));
*blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04*
(V-0.5));
}
static MagickBooleanType TransformsRGBImage(Image *image,
ExceptionInfo *exception)
{
#define TransformsRGBImageTag "Transform/Image"
static const float
YCCMap[1389] =
{
0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f,
0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f,
0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f,
0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f,
0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f,
0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f,
0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f,
0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f,
0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f,
0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f,
0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f,
0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f,
0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f,
0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f,
0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f,
0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f,
0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f,
0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f,
0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f,
0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f,
0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f,
0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f,
0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f,
0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f,
0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f,
0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f,
0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f,
0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f,
0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f,
0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f,
0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f,
0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f,
0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f,
0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f,
0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f,
0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f,
0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f,
0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f,
0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f,
0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f,
0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f,
0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f,
0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f,
0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f,
0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f,
0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f,
0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f,
0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f,
0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f,
0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f,
0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f,
0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f,
0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f,
0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f,
0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f,
0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f,
0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f,
0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f,
0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f,
0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f,
0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f,
0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f,
0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f,
0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f,
0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f,
0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f,
0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f,
0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f,
0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f,
0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f,
0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f,
0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f,
0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f,
0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f,
0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f,
0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f,
0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f,
0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f,
0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f,
0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f,
0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f,
0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f,
0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f,
0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f,
0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f,
0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f,
0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f,
0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f,
0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f,
0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f,
0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f,
0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f,
0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f,
0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f,
0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f,
0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f,
0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f,
0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f,
0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f,
0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f,
0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f,
0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f,
0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f,
0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f,
0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f,
0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f,
0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f,
0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f,
0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f,
0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f,
0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f,
0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f,
0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f,
0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f,
0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f,
0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f,
0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f,
0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f,
0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f,
0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f,
0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f,
0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f,
0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f,
0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f,
0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f,
0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f,
0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f,
0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f,
0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f,
0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f,
0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f,
0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f,
0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f,
0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f,
0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f,
0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f,
0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f,
0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f,
0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f,
0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f,
0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f,
0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f,
0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f,
0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f,
0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f,
0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f,
0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f,
0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f,
0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f,
0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f,
0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f,
0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f,
0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f,
0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f,
0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f,
0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f,
0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f,
0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f,
0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f,
0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f,
0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f,
0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f,
0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f,
0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f,
0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f,
0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f,
0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f,
0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f,
0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f,
0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f,
0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f,
0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f,
0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f,
0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f,
0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f,
0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f,
0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f,
0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f,
0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f,
0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f,
0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f,
0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f,
0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f,
0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f,
0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f,
0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f,
0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f,
0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f,
0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f,
0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f,
0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f,
0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f,
0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f,
0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f,
0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f,
0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f,
0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f,
0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f,
0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f,
0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f,
0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f,
0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f,
0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f,
0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f,
0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f,
0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f,
0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f,
0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f,
0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f,
0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f,
0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f,
0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f,
0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f,
0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f,
0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f,
0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f,
0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f,
0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f,
0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f,
0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f,
0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f,
0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f,
0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f,
0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f,
0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f,
0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f,
0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f,
0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f,
0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f,
0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f,
0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f,
0.998559f, 0.999280f, 1.000000f
};
CacheView
*image_view;
const char
*artifact;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
TransformPacket
*y_map,
*x_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
illuminant=UndefinedIlluminant;
}
status=MagickTrue;
progress=0;
switch (image->colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Transform image from CMYK to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertCMYKToRGB(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*EncodePixelGamma(GetPixelRed(image,q))+0.715158*
EncodePixelGamma(GetPixelGreen(image,q))+0.072186*
EncodePixelGamma(GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case GRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case Adobe98Colorspace:
case CMYColorspace:
case DisplayP3Colorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case ProPhotoColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from source colorspace to sRGB.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
X=QuantumScale*GetPixelRed(image,q);
Y=QuantumScale*GetPixelGreen(image,q);
Z=QuantumScale*GetPixelBlue(image,q);
switch (image->colorspace)
{
case Adobe98Colorspace:
{
ConvertAdobe98ToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case CMYColorspace:
{
ConvertCMYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case DisplayP3Colorspace:
{
ConvertDisplayP3ToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLColorspace:
{
ConvertHCLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSBColorspace:
{
ConvertHSBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSIColorspace:
{
ConvertHSIToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSLColorspace:
{
ConvertHSLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSVColorspace:
{
ConvertHSVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HWBColorspace:
{
ConvertHWBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case JzazbzColorspace:
{
ConvertJzazbzToRGB(X,Y,Z,white_luminance,&red,&green,&blue);
break;
}
case LabColorspace:
{
ConvertLabToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertLCHabToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ConvertLCHuvToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LMSColorspace:
{
ConvertLMSToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LuvColorspace:
{
ConvertLuvToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case ProPhotoColorspace:
{
ConvertProPhotoToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case xyYColorspace:
{
ConvertxyYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case XYZColorspace:
{
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YCbCrColorspace:
{
ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YDbDrColorspace:
{
ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YIQColorspace:
{
ConvertYIQToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YPbPrColorspace:
{
ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YUVColorspace:
{
ConvertYUVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
default:
{
red=QuantumRange*X;
green=QuantumRange*Y;
blue=QuantumRange*Z;
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform Log to sRGB colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma));
for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++)
logmap[i]=(Quantum) 0;
for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++)
logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)*
(pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma))-black));
for ( ; i <= (ssize_t) MaxMap; i++)
logmap[i]=QuantumRange;
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))];
green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))];
blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))];
SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
red)),q);
SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
green)),q);
SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
blue)),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
switch (image->colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
R = I1+1.00000*I2-0.66668*I3
G = I1+0.00000*I2+1.33333*I3
B = I1-1.00000*I2-0.66668*I3
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.402000*Cr
G = Y-0.344136*Cb-0.714136*Cr
B = Y+1.772000*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.99999999999914679361*(double) i;
y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap);
z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap);
x_map[i].y=0.99999975910502514331*(double) i;
y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap);
z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap);
x_map[i].z=1.00000124040004623180*(double) i;
y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap);
z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.574800*Cr
G = Y-0.187324*Cb-0.468124*Cr
B = Y+1.855600*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*i);
y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*i);
y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*i);
y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap));
z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
R = Y +1.340762*C2
G = Y-0.317038*C1-0.682243*C2
B = Y+1.632639*C1
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.3584000*(double) i);
y_map[i].x=(MagickRealType) 0.0000000;
z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].y=(MagickRealType) (1.3584000*(double) i);
y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].z=(MagickRealType) (1.3584000*(double) i);
y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].z=(MagickRealType) 0.0000000;
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert to sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
blue,
green,
red;
red=ScaleQuantumToMap(GetPixelRed(image,q));
green=ScaleQuantumToMap(GetPixelGreen(image,q));
blue=ScaleQuantumToMap(GetPixelBlue(image,q));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
SetPixelRed(image,ClampToQuantum(pixel.red),q);
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransformsRGBImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
/*
Convert PseudoClass image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
size_t
blue,
green,
red;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
image->colormap[i].red=(double) ClampToQuantum(pixel.red);
image->colormap[i].green=(double) ClampToQuantum(pixel.green);
image->colormap[i].blue=(double) ClampToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(MagickTrue);
}
|
atomic.c | /* Copyright (C) 2005-2020 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU Offloading and Multi Processing Library
(libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file contains helpers for the ATOMIC construct. */
#include "libgomp.h"
/* This mutex is used when atomic operations don't exist for the target
in the mode requested. The result is not globally atomic, but works so
long as all parallel references are within #pragma omp atomic directives.
According to responses received from omp@openmp.org, appears to be within
spec. Which makes sense, since that's how several other compilers
handle this situation as well. */
static gomp_mutex_t atomic_lock;
void
GOMP_atomic_start (void)
{
gomp_mutex_lock (&atomic_lock);
}
void
GOMP_atomic_end (void)
{
gomp_mutex_unlock (&atomic_lock);
}
#if !GOMP_MUTEX_INIT_0
static void __attribute__((constructor))
initialize_atomic (void)
{
gomp_mutex_init (&atomic_lock);
}
#endif
|
GB_binop__lt_fp64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__lt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__lt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_fp64)
// A*D function (colscale): GB (_AxD__lt_fp64)
// D*A function (rowscale): GB (_DxB__lt_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_fp64)
// C=scalar+B GB (_bind1st__lt_fp64)
// C=scalar+B' GB (_bind1st_tran__lt_fp64)
// C=A+scalar GB (_bind2nd__lt_fp64)
// C=A'+scalar GB (_bind2nd_tran__lt_fp64)
// C type: bool
// A type: double
// A pattern? 0
// B type: double
// B pattern? 0
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_FP64 || GxB_NO_LT_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
double alpha_scalar ;
double beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((double *) alpha_scalar_in)) ;
beta_scalar = (*((double *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distribute-cache-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/nt-base-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/policy.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/utility.h"
#include "magick/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const IndexPacket
*GetVirtualIndexesFromCache(const Image *);
static const PixelPacket
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,
PixelPacket *,ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,PixelPacket *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCacheIndexes(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCacheIndexes(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCachePixels(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static PixelPacket
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *,const MapMode,
const RectangleInfo *,const MagickBooleanType,NexusInfo *,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static inline OpenCLCacheInfo *RelinquishOpenCLCacheInfo(MagickCLEnv clEnv,
OpenCLCacheInfo *info)
{
ssize_t
i;
for (i=0; i < (ssize_t) info->event_count; i++)
clEnv->library->clReleaseEvent(info->events[i]);
info->events=(cl_event *) RelinquishMagickMemory(info->events);
DestroySemaphoreInfo(&info->events_semaphore);
if (info->buffer != (cl_mem) NULL)
{
clEnv->library->clReleaseMemObject(info->buffer);
info->buffer=(cl_mem) NULL;
}
return((OpenCLCacheInfo *) RelinquishMagickMemory(info));
}
static void CL_API_CALL RelinquishPixelCachePixelsDelayed(
cl_event magick_unused(event),cl_int magick_unused(event_command_exec_status),
void *user_data)
{
MagickCLEnv
clEnv;
OpenCLCacheInfo
*info;
PixelPacket
*pixels;
ssize_t
i;
magick_unreferenced(event);
magick_unreferenced(event_command_exec_status);
info=(OpenCLCacheInfo *) user_data;
clEnv=GetDefaultOpenCLEnv();
for (i=(ssize_t)info->event_count-1; i >= 0; i--)
{
cl_int
event_status;
cl_uint
status;
status=clEnv->library->clGetEventInfo(info->events[i],
CL_EVENT_COMMAND_EXECUTION_STATUS,sizeof(cl_int),&event_status,NULL);
if ((status == CL_SUCCESS) && (event_status != CL_COMPLETE))
{
clEnv->library->clSetEventCallback(info->events[i],CL_COMPLETE,
&RelinquishPixelCachePixelsDelayed,info);
return;
}
}
pixels=info->pixels;
RelinquishMagickResource(MemoryResource,info->length);
(void) RelinquishOpenCLCacheInfo(clEnv,info);
(void) RelinquishAlignedMemory(pixels);
}
static MagickBooleanType RelinquishOpenCLBuffer(
CacheInfo *magick_restrict cache_info)
{
MagickCLEnv
clEnv;
assert(cache_info != (CacheInfo *) NULL);
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
return(MagickFalse);
RelinquishPixelCachePixelsDelayed((cl_event) NULL,0,cache_info->opencl);
return(MagickTrue);
}
static cl_event *CopyOpenCLEvents(OpenCLCacheInfo *opencl_info,
cl_uint *event_count)
{
cl_event
*events;
register size_t
i;
assert(opencl_info != (OpenCLCacheInfo *) NULL);
events=(cl_event *) NULL;
LockSemaphoreInfo(opencl_info->events_semaphore);
*event_count=opencl_info->event_count;
if (*event_count > 0)
{
events=AcquireQuantumMemory(*event_count,sizeof(*events));
if (events == (cl_event *) NULL)
*event_count=0;
else
{
for (i=0; i < opencl_info->event_count; i++)
events[i]=opencl_info->events[i];
}
}
UnlockSemaphoreInfo(opencl_info->events_semaphore);
return(events);
}
#endif
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A d d O p e n C L E v e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddOpenCLEvent() adds an event to the list of operations the next operation
% should wait for.
%
% The format of the AddOpenCLEvent() method is:
%
% void AddOpenCLEvent(const Image *image,cl_event event)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o event: the event that should be added.
%
*/
extern MagickPrivate void AddOpenCLEvent(const Image *image,cl_event event)
{
CacheInfo
*magick_restrict cache_info;
MagickCLEnv
clEnv;
assert(image != (const Image *) NULL);
assert(event != (cl_event) NULL);
cache_info=(CacheInfo *)image->cache;
assert(cache_info->opencl != (OpenCLCacheInfo *) NULL);
clEnv=GetDefaultOpenCLEnv();
if (clEnv->library->clRetainEvent(event) != CL_SUCCESS)
{
clEnv->library->clWaitForEvents(1,&event);
return;
}
LockSemaphoreInfo(cache_info->opencl->events_semaphore);
if (cache_info->opencl->events == (cl_event *) NULL)
{
cache_info->opencl->events=AcquireMagickMemory(sizeof(
*cache_info->opencl->events));
cache_info->opencl->event_count=1;
}
else
cache_info->opencl->events=ResizeQuantumMemory(cache_info->opencl->events,
++cache_info->opencl->event_count,sizeof(*cache_info->opencl->events));
if (cache_info->opencl->events == (cl_event *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
cache_info->opencl->events[cache_info->opencl->event_count-1]=event;
UnlockSemaphoreInfo(cache_info->opencl->events_semaphore);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireCriticalMemory(sizeof(*cache_info));
(void) ResetMagickMemory(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->channels=4;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->semaphore=AllocateSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AllocateSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads,
sizeof(**nexus_info));
if (nexus_info[0] == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(nexus_info[0],0,number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
nexus_info[i]=(&nexus_info[0][i]);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% const void *AcquirePixelCachePixels(const Image *image,
% MagickSizeType *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const void *AcquirePixelCachePixels(const Image *image,
MagickSizeType *length,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) exception;
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((const void *) NULL);
*length=cache_info->length;
return((const void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickExport MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AllocateSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickExport void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
DestroySemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
NexusInfo
**magick_restrict clip_nexus,
**magick_restrict image_nexus;
register const PixelPacket
*magick_restrict r;
register IndexPacket
*magick_restrict nexus_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->clip_mask == (Image *) NULL) ||
(image->storage_class == PseudoClass))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
image_nexus=AcquirePixelCacheNexus(1);
clip_nexus=AcquirePixelCacheNexus(1);
if ((image_nexus == (NexusInfo **) NULL) ||
(clip_nexus == (NexusInfo **) NULL))
ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,image_nexus[0],
exception);
indexes=image_nexus[0]->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelsFromNexus(image->clip_mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],exception);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
if (GetPixelIntensity(image,r) > (QuantumRange/2.0))
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,GetPixelOpacity(p));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
}
p++;
q++;
r++;
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
image_nexus=DestroyPixelCacheNexus(image_nexus,1);
if (i < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickExport void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info,
ExceptionInfo *exception)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && \
((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && \
((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
if ((cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->active_index_channel == clone_info->active_index_channel))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) ||
(clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->columns*cache_info->rows*sizeof(*cache_info->pixels));
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
(void) memcpy(clone_info->indexes,cache_info->indexes,
cache_info->columns*cache_info->rows*
sizeof(*cache_info->indexes));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info,exception));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
if ((cache_nexus == (NexusInfo **) NULL) ||
(clone_nexus == (NexusInfo **) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->pixels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickTrue,
cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,MagickTrue,
clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) ResetMagickMemory(clone_nexus[id]->pixels,0,(size_t)
clone_nexus[id]->length);
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length);
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
{
/*
Clone indexes.
*/
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->indexes);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickTrue,
cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,MagickTrue,
clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length);
status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception);
}
}
cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads);
clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache == (void *) NULL)
return;
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (RelinquishOpenCLBuffer(cache_info) != MagickFalse)
{
cache_info->pixels=(PixelPacket *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(PixelPacket *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(PixelPacket *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->indexes=(IndexPacket *) NULL;
}
MagickExport Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(PixelPacket *) NULL;
nexus_info->pixels=(PixelPacket *) NULL;
nexus_info->indexes=(IndexPacket *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickExport NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
{
if (nexus_info[i]->cache != (PixelPacket *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetAuthenticPixelsCache().
%
% The format of the GetAuthenticIndexesFromCache() method is:
%
% IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexQueue() returns the authentic black channel or the colormap
% indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetAuthenticIndexQueue() method is:
%
% IndexPacket *GetAuthenticIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
return(cache_info->methods.get_authentic_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
cl_context
context;
cl_int
status;
MagickCLEnv
clEnv;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *)image->cache;
if (cache_info->type == UndefinedCache)
SyncImagePixelCache((Image *) image,exception);
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
clEnv=GetDefaultOpenCLEnv();
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
{
assert(cache_info->pixels != NULL);
context=GetOpenCLContext(clEnv);
cache_info->opencl=(OpenCLCacheInfo *) AcquireCriticalMemory(
sizeof(*cache_info->opencl));
(void) ResetMagickMemory(cache_info->opencl,0,
sizeof(*cache_info->opencl));
cache_info->opencl->events_semaphore=AllocateSemaphoreInfo();
cache_info->opencl->length=cache_info->length;
cache_info->opencl->pixels=cache_info->pixels;
cache_info->opencl->buffer=clEnv->library->clCreateBuffer(context,
CL_MEM_USE_HOST_PTR,cache_info->length,cache_info->pixels,&status);
if (status != CL_SUCCESS)
cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl);
}
if (cache_info->opencl != (OpenCLCacheInfo *) NULL)
clEnv->library->clRetainMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
return((cl_mem) NULL);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% PixelPacket *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
PixelPacket
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((PixelPacket *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
if (cache_info->active_index_channel != MagickFalse)
if (ReadPixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% PixelPacket *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static PixelPacket *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% PixelPacket *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport PixelPacket *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a PixelPacket array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or if the storage class is
% PseduoClass, call GetAuthenticIndexQueue() after invoking
% GetAuthenticPixels() to obtain the black color component or colormap indexes
% (of type IndexPacket) corresponding to the region. Once the PixelPacket
% (and/or IndexPacket) array has been updated, the changes must be saved back
% to the underlying image using SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
return(cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O p e n C L E v e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOpenCLEvents() returns the events that the next operation should wait
% for. The argument event_count is set to the number of events.
%
% The format of the GetOpenCLEvents() method is:
%
% const cl_event *GetOpenCLEvents(const Image *image,
% cl_command_queue queue)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o event_count: will be set to the number of events.
%
*/
extern MagickPrivate cl_event *GetOpenCLEvents(const Image *image,
cl_uint *event_count)
{
CacheInfo
*magick_restrict cache_info;
cl_event
*events;
assert(image != (const Image *) NULL);
assert(event_count != (cl_uint *) NULL);
cache_info=(CacheInfo *) image->cache;
*event_count=0;
events=(cl_event *) NULL;
if (cache_info->opencl != (OpenCLCacheInfo *) NULL)
events=CopyOpenCLEvents(cache_info->opencl,event_count);
return(events);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
CacheInfo
*magick_restrict cache_info;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_epoch=time((time_t *) NULL);
cache_timelimit=GetMagickResourceLimit(TimeResource);
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (time((time_t *) NULL)-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AllocateSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
DestroySemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->type == DiskCache)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MapCache, MemoryCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetPixelCacheType(const Image *image)
{
return(GetImagePixelCacheType(image));
}
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
PixelPacket
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,
pixel,exception));
pixels=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,PixelPacket *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M a g i c k P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMagickPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMagickPixel() method is:
%
% MagickBooleanType GetOneVirtualMagickPixel(const Image image,
% const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
% ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image,
const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
GetMagickPixelPacket(image,pixel);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]);
SetMagickPixelPacket(image,pixels,indexes,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M e t h o d P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMethodPixel() returns a single pixel at the specified (x,y)
% location as defined by specified pixel method. The image background color
% is returned if an error occurs. If you plan to modify the pixel, use
% GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMethodPixel() method is:
%
% MagickBooleanType GetOneVirtualMethodPixel(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,Pixelpacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMethodPixel(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
virtual_pixel_method,x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelPacket method,const ssize_t x,const ssize_t y,
% PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
*pixel=image->background_color;
pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheChannels() returns the number of pixel channels associated
% with this instance of the pixel cache.
%
% The format of the GetPixelCacheChannels() method is:
%
% size_t GetPixelCacheChannels(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheChannels returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport size_t GetPixelCacheChannels(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) ResetMagickMemory(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_indexes_from_handler=GetVirtualIndexesFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_indexes_from_handler=
GetAuthenticIndexesFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated with
% the last call to SetPixelCacheNexusPixels() or GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickExport MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) exception;
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimize cache tile width in pixels.
%
% o height: the optimize cache tile height in pixels.
%
*/
MagickExport void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*width=2048UL/sizeof(PixelPacket);
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/sizeof(PixelPacket);
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualIndexesFromCache() method is:
%
% IndexPacket *GetVirtualIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const IndexPacket *GetVirtualIndexesFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromNexus() returns the indexes associated with the
% specified cache nexus.
%
% The format of the GetVirtualIndexesFromNexus() method is:
%
% const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap indexes.
%
*/
MagickExport const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((IndexPacket *) NULL);
return(nexus_info->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexQueue() returns the virtual black channel or the
% colormap indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetVirtualIndexQueue() method is:
%
% const IndexPacket *GetVirtualIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const IndexPacket *GetVirtualIndexQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
return(cache_info->methods.get_virtual_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelsFromNexus() method is:
%
% PixelPacket *GetVirtualPixelsFromNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
/*
VirtualPixelModulo() computes the remainder of dividing offset by extent. It
returns not only the quotient (tile the offset falls in) but also the positive
remainer within that tile such that 0 <= remainder < extent. This method is
essentially a ldiv() using a floored modulo division rather than the normal
default truncated modulo division.
*/
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/(ssize_t) extent;
if (offset < 0L)
modulo.quotient--;
modulo.remainder=offset-modulo.quotient*(ssize_t) extent;
return(modulo);
}
MagickExport const PixelPacket *GetVirtualPixelsFromNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
IndexPacket
virtual_index;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
**magick_restrict virtual_nexus;
PixelPacket
*magick_restrict pixels,
virtual_pixel;
RectangleInfo
region;
register const IndexPacket
*magick_restrict virtual_indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
u,
v;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const PixelPacket *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((const PixelPacket *) NULL);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
if ((cache_info->storage_class == PseudoClass) ||
(cache_info->colorspace == CMYKColorspace))
{
status=ReadPixelCacheIndexes(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
}
return(pixels);
}
/*
Pixel request is outside cache extents.
*/
q=pixels;
indexes=nexus_info->indexes;
virtual_nexus=AcquirePixelCacheNexus(1);
if (virtual_nexus == (NexusInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"UnableToGetCacheNexus","`%s'",image->filename);
return((const PixelPacket *) NULL);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case GrayVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange/2);
SetPixelGreen(&virtual_pixel,QuantumRange/2);
SetPixelBlue(&virtual_pixel,QuantumRange/2);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case TransparentVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,TransparentOpacity);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange);
SetPixelGreen(&virtual_pixel,QuantumRange);
SetPixelBlue(&virtual_pixel,QuantumRange);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
default:
{
virtual_pixel=image->background_color;
break;
}
}
virtual_index=0;
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
*virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
}
if (p == (const PixelPacket *) NULL)
break;
*q++=(*p);
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
*indexes++=(*virtual_indexes);
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,*virtual_nexus,exception);
if (p == (const PixelPacket *) NULL)
break;
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,*virtual_nexus);
(void) memcpy(q,p,(size_t) length*sizeof(*p));
q+=length;
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
{
(void) memcpy(indexes,virtual_indexes,(size_t) length*
sizeof(*virtual_indexes));
indexes+=length;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1);
if (v < (ssize_t) rows)
return((const PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const PixelPacket *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const PixelPacket *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated with the
% last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const PixelPacket *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const PixelPacket *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access
% the black color component or to obtain the colormap indexes (of type
% IndexPacket) corresponding to the region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const PixelPacket *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated with the last call
% to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% PixelPacket *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const PixelPacket *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const IndexPacket *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickExport const PixelPacket *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((PixelPacket *) NULL);
return((const PixelPacket *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void MagickPixelCompositeMask(const MagickPixelPacket *p,
const MagickRealType alpha,const MagickPixelPacket *q,
const MagickRealType beta,MagickPixelPacket *composite)
{
double
gamma;
if (alpha == TransparentOpacity)
{
*composite=(*q);
return;
}
gamma=1.0-QuantumScale*QuantumScale*alpha*beta;
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*MagickOver_(p->red,alpha,q->red,beta);
composite->green=gamma*MagickOver_(p->green,alpha,q->green,beta);
composite->blue=gamma*MagickOver_(p->blue,alpha,q->blue,beta);
if ((p->colorspace == CMYKColorspace) && (q->colorspace == CMYKColorspace))
composite->index=gamma*MagickOver_(p->index,alpha,q->index,beta);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickPixelPacket
alpha,
beta;
MagickSizeType
number_pixels;
NexusInfo
**magick_restrict clip_nexus,
**magick_restrict image_nexus;
register const PixelPacket
*magick_restrict r;
register IndexPacket
*magick_restrict nexus_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->mask == (Image *) NULL) || (image->storage_class == PseudoClass))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
image_nexus=AcquirePixelCacheNexus(1);
clip_nexus=AcquirePixelCacheNexus(1);
if ((image_nexus == (NexusInfo **) NULL) ||
(clip_nexus == (NexusInfo **) NULL))
ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,
nexus_info->region.y,nexus_info->region.width,nexus_info->region.height,
image_nexus[0],exception);
indexes=image_nexus[0]->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelsFromNexus(image->mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],&image->exception);
GetMagickPixelPacket(image,&alpha);
GetMagickPixelPacket(image,&beta);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
SetMagickPixelPacket(image,p,indexes+i,&alpha);
SetMagickPixelPacket(image,q,nexus_indexes+i,&beta);
MagickPixelCompositeMask(&beta,GetPixelIntensity(image,r),&alpha,
alpha.opacity,&beta);
SetPixelRed(q,ClampToQuantum(beta.red));
SetPixelGreen(q,ClampToQuantum(beta.green));
SetPixelBlue(q,ClampToQuantum(beta.blue));
SetPixelOpacity(q,ClampToQuantum(beta.opacity));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
p++;
q++;
r++;
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
image_nexus=DestroyPixelCacheNexus(image_nexus,1);
if (i < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% colormap indexes, and memory mapping the cache if it is disk based. The
% cache nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
message[MaxTextExtent];
(void) FormatMagickSize(length,MagickFalse,format);
(void) FormatLocaleString(message,MaxTextExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
(void) posix_fallocate(cache_info->file,offset+1,extent-offset);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MaxTextExtent],
message[MaxTextExtent];
const char
*hosts,
*type;
MagickSizeType
length,
number_pixels;
MagickStatusType
status;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) ||
(AcquireMagickResource(HeightResource,image->rows) == MagickFalse))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MaxTextExtent,"%s[%.20g]",
image->filename,(double) GetImageIndexInList(image));
cache_info->mode=mode;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
cache_info->channels=image->channels;
cache_info->active_index_channel=((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace)) ? MagickTrue : MagickFalse;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
packet_size+=sizeof(IndexPacket);
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (PixelPacket *) NULL)
cache_info->pixels=source_info.pixels;
else
{
/*
Create memory pixel cache.
*/
cache_info->colorspace=image->colorspace;
cache_info->type=MemoryCache;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status&=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",
type,(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
return(status == 0 ? MagickFalse : MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MaxTextExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,
format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,GetDistributeCacheFile(
(DistributeCacheInfo *) cache_info->server_info),type,
(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status == MagickFalse)
cache_info->type=DiskCache;
else
if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache))
{
cache_info->type=DiskCache;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
cache_info->pixels=(PixelPacket *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->type=DiskCache;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,
format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MaxTextExtent);
cache_info->type=DiskCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(SyncImagePixelCache(image,exception));
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MaxTextExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->active_index_channel=cache_info->active_index_channel;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
clone_info->channels=cache_info->channels;
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixel(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,clone,nexus_info,
exception));
}
MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
PixelPacket
*magick_restrict pixels;
RectangleInfo
region;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((PixelPacket *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((PixelPacket *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((PixelPacket *) NULL);
/*
Return pixel cache.
*/
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,®ion,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a PixelPacket array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain
% the black color component or the colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
return(cache_info->methods.queue_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheIndexes() reads colormap indexes from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheIndexes() method is:
%
% MagickBooleanType ReadPixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheIndexes(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register IndexPacket
*magick_restrict q;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=length*rows;
q=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*magick_restrict p;
/*
Read indexes from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read indexes from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*q),length,(unsigned char *) q);
if (count < (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read indexes from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register PixelPacket
*magick_restrict q;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
if ((length/sizeof(PixelPacket)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
q=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*q),length,(unsigned char *) q);
if (count < (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickExport Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
cache_info->methods.get_virtual_indexes_from_handler=
cache_methods->get_virtual_indexes_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
cache_info->methods.get_authentic_indexes_from_handler=
cache_methods->get_authentic_indexes_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% PixelPacket SetPixelCacheNexusPixels(const CacheInfo *cache_info,
% const MapMode mode,const RectangleInfo *region,
% const MagickBooleanType buffered,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o region: A pointer to the RectangleInfo structure that defines the
% region of this particular cache nexus.
%
% o buffered: pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length))
return(MagickFalse);
if (cache_anonymous_memory <= 0)
{
nexus_info->mapped=MagickFalse;
nexus_info->cache=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) nexus_info->length));
if (nexus_info->cache != (PixelPacket *) NULL)
(void) ResetMagickMemory(nexus_info->cache,0,(size_t)
nexus_info->length);
}
else
{
nexus_info->mapped=MagickTrue;
nexus_info->cache=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t)
nexus_info->length);
}
if (nexus_info->cache == (PixelPacket *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
return(MagickTrue);
}
static inline MagickBooleanType IsAuthenticPixelCache(
const CacheInfo *magick_restrict cache_info,
const NexusInfo *magick_restrict nexus_info)
{
MagickBooleanType
status;
MagickOffsetType
offset;
/*
Does nexus pixels point directly to in-core cache pixels or is it buffered?
*/
if (cache_info->type == PingCache)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
status=nexus_info->pixels == (cache_info->pixels+offset) ? MagickTrue :
MagickFalse;
return(status);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
magick_unreferenced(nexus_info);
magick_unreferenced(mode);
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1);
}
static PixelPacket *SetPixelCacheNexusPixels(const CacheInfo *cache_info,
const MapMode mode,const RectangleInfo *region,
const MagickBooleanType buffered,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((PixelPacket *) NULL);
if ((region->width == 0) || (region->height == 0))
return((PixelPacket *) NULL);
nexus_info->region=(*region);
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
ssize_t
x,
y;
x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1;
y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1;
if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) &&
(nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) &&
((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) &&
((nexus_info->region.width == cache_info->columns) ||
((nexus_info->region.width % cache_info->columns) == 0)))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
nexus_info->pixels=cache_info->pixels+offset;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=cache_info->indexes+offset;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info,
nexus_info);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
length=number_pixels*sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
length+=number_pixels*sizeof(IndexPacket);
if (nexus_info->cache == (PixelPacket *) NULL)
{
nexus_info->length=length;
status=AcquireCacheNexusPixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
{
nexus_info->length=0;
return((PixelPacket *) NULL);
}
}
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
nexus_info->length=length;
status=AcquireCacheNexusPixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
{
nexus_info->length=0;
return((PixelPacket *) NULL);
}
}
nexus_info->pixels=nexus_info->cache;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=(IndexPacket *) (nexus_info->pixels+number_pixels);
PrefetchPixelCacheNexusPixels(nexus_info,mode);
nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info,
nexus_info);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,
const Quantum opacity)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->matte=MagickTrue;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,&image->exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
&image->exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
q->opacity=opacity;
q++;
}
status=SyncCacheViewAuthenticPixels(image_view,&image->exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace((Image *) image,sRGBColorspace);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->matte == MagickFalse)
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() ensures all the OpenCL operations have been
% completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
MagickCLEnv
clEnv;
assert(cache_info != (CacheInfo *)NULL);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (OpenCLCacheInfo *)NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl != (OpenCLCacheInfo *)NULL)
{
cl_event
*events;
cl_uint
event_count;
clEnv=GetDefaultOpenCLEnv();
events=CopyOpenCLEvents(cache_info->opencl,&event_count);
if (events != (cl_event *) NULL)
{
cl_command_queue
queue;
cl_context
context;
cl_int
status;
PixelPacket
*pixels;
context=GetOpenCLContext(clEnv);
queue=AcquireOpenCLCommandQueue(clEnv);
pixels=(PixelPacket *) clEnv->library->clEnqueueMapBuffer(queue,
cache_info->opencl->buffer,CL_TRUE, CL_MAP_READ | CL_MAP_WRITE,0,
cache_info->length,event_count,events,NULL,&status);
assert(pixels == cache_info->pixels);
events=(cl_event *) RelinquishMagickMemory(events);
RelinquishOpenCLCommandQueue(clEnv,queue);
}
cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *)NULL);
cache_info = (CacheInfo *)image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->clip_mask != (Image *) NULL) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->mask != (Image *) NULL) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->active_index_channel != MagickFalse) &&
(WritePixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (status != MagickFalse)
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
return(cache_info->methods.sync_authentic_pixels_handler(image,exception));
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheIndexes() writes the colormap indexes to the specified
% region of the pixel cache.
%
% The format of the WritePixelCacheIndexes() method is:
%
% MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const IndexPacket
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=(MagickSizeType) length*rows;
p=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*magick_restrict q;
/*
Write indexes to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write indexes to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *)
p);
if (count < (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write indexes to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const PixelPacket
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
rows=nexus_info->region.height;
extent=length*rows;
p=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*p),length,(const unsigned char *) p);
if (count < (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
convolution_1x1_pack4to16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_pack4to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
const int size = w * h;
Mat bottom_im2col = bottom_blob;
bottom_im2col.w = size;
bottom_im2col.h = 1;
im2col_sgemm_pack4to16_avx512(bottom_im2col, top_blob, kernel, _bias, opt);
}
static void conv1x1s2_sgemm_pack4to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m128 _v = _mm_load_ps(r0);
_mm_store_ps(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4to16_avx512(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
Lamg.h | /*
* Lamg.h
*
* Created on: Oct 20, 2015
* Author: Michael Wegner (michael.wegner@student.kit.edu)
*/
#ifndef NETWORKIT_CPP_NUMERICS_LAMG_LAMG_H_
#define NETWORKIT_CPP_NUMERICS_LAMG_LAMG_H_
#include <vector>
#include "../LinearSolver.h"
#include "MultiLevelSetup.h"
#include "SolverLamg.h"
#include "../GaussSeidelRelaxation.h"
#include "../../algebraic/MatrixTools.h"
#include "../../components/ParallelConnectedComponents.h"
#include "omp.h"
namespace NetworKit {
/**
* @ingroup numerics
* Represents the interface to the Lean Algebraic Multigrid (LAMG) graph Laplacian linear solver
* by Oren E. Livne and Achi Brandt.
* @see Livne, Oren E., and Achi Brandt. "Lean algebraic multigrid (LAMG): Fast graph Laplacian linear solver." SIAM Journal on Scientific Computing 34.4 (2012): B499-B522.
*/
template<class Matrix>
class Lamg : public LinearSolver<Matrix> {
private:
bool validSetup;
GaussSeidelRelaxation<Matrix> smoother;
MultiLevelSetup<Matrix> lamgSetup;
Matrix laplacianMatrix;
std::vector<LevelHierarchy<Matrix>> compHierarchies;
std::vector<SolverLamg<Matrix>> compSolvers;
std::vector<LAMGSolverStatus> compStati;
std::vector<Vector> initialVectors;
std::vector<Vector> rhsVectors;
count numComponents;
std::vector<std::vector<index>> components;
std::vector<index> graph2Components;
void initializeForOneComponent();
public:
/**
* Construct a solver with the given @a tolerance. The relative residual ||Ax-b||/||b|| will be less than or equal to
* @a tolerance after the solver finished.
* @param tolerance
*/
Lamg(const double tolerance = 1e-6) : LinearSolver<Matrix>(tolerance), validSetup(false), lamgSetup(smoother), numComponents(0) {}
/** Default destructor */
~Lamg() = default;
/**
* Compute the multigrid hierarchy for the given Laplacian matrix @a laplacianMatrix.
* @param laplacianMatrix
* @note This method also works for disconnected graphs. If you know that the graph is connected,
* if is faster to use @ref setupConnected instead.
*/
void setup(const Matrix& laplacianMatrix);
/**
* Compute the multigrid hierarchy for te given Laplacian matrix @a laplacianMatrix.
* @param laplacianMatrix
* @note The graph has to be connected for this method to work. Otherwise the output is undefined.
*/
void setupConnected(const Matrix& laplacianMatrix);
/**
* Computes the @a result for the matrix currently setup and the right-hand side @a rhs.
* The maximum spent time can be specified by @a maxConvergenceTime and the maximum number of iterations can be set
* by @a maxIterations.
* @param rhs
* @param result
* @param maxConvergenceTime
* @param maxIterations
* @return A @ref SolverStatus object which provides some statistics like the final absolute residual.
*/
SolverStatus solve(const Vector& rhs, Vector& result, count maxConvergenceTime = 5 * 60 * 1000, count maxIterations = std::numeric_limits<count>::max());
/**
* Compute the @a results for the matrix currently setup and the right-hand sides @a rhs.
* The maximum spent time for each system can be specified by @a maxConvergenceTime and the maximum number of iterations can be set
* by @a maxIterations.
* @param rhs
* @param results
* @param maxConvergenceTime
* @param maxIterations
*/
void parallelSolve(const std::vector<Vector>& rhs, std::vector<Vector>& results, count maxConvergenceTime = 5 * 60 * 1000, count maxIterations = std::numeric_limits<count>::max());
};
template<class Matrix>
void Lamg<Matrix>::initializeForOneComponent() {
compHierarchies = std::vector<LevelHierarchy<Matrix>>(1);
lamgSetup.setup(laplacianMatrix, compHierarchies[0]);
compSolvers.clear();
compSolvers.push_back(SolverLamg<Matrix>(compHierarchies[0], smoother));
validSetup = true;
}
template<class Matrix>
void Lamg<Matrix>::setupConnected(const Matrix& laplacianMatrix) {
this->laplacianMatrix = laplacianMatrix;
initializeForOneComponent();
numComponents = 1;
}
template<class Matrix>
void Lamg<Matrix>::setup(const Matrix& laplacianMatrix) {
this->laplacianMatrix = laplacianMatrix;
Graph G = MatrixTools::matrixToGraph(laplacianMatrix);
ParallelConnectedComponents con(G, false);
con.run();
numComponents = con.numberOfComponents();
if (numComponents == 1) {
initializeForOneComponent();
} else {
graph2Components = std::vector<index>(G.numberOfNodes());
initialVectors = std::vector<Vector>(numComponents);
rhsVectors = std::vector<Vector>(numComponents);
components = std::vector<std::vector<index>>(numComponents);
compHierarchies = std::vector<LevelHierarchy<Matrix>>(numComponents);
compSolvers.clear();
compStati = std::vector<LAMGSolverStatus>(numComponents);
// create solver for every component
index compIdx = 0;
for (auto component : con.getPartition().getSubsets()) {
components[compIdx] = std::vector<index>(component.begin(), component.end());
std::vector<Triplet> triplets;
index idx = 0;
for (node u : components[compIdx]) {
graph2Components[u] = idx;
idx++;
}
for (node u : components[compIdx]) {
G.forNeighborsOf(u, [&](node v, edgeweight w) {
triplets.push_back({graph2Components[u], graph2Components[v], w});
});
}
Matrix compMatrix(component.size(), component.size(), triplets);
initialVectors[compIdx] = Vector(component.size());
rhsVectors[compIdx] = Vector(component.size());
lamgSetup.setup(compMatrix, compHierarchies[compIdx]);
compSolvers.push_back(SolverLamg<Matrix>(compHierarchies[compIdx], smoother));
LAMGSolverStatus status;
status.desiredResidualReduction = this->tolerance * component.size() / G.numberOfNodes();
compStati[compIdx] = status;
compIdx++;
}
validSetup = true;
}
}
template<class Matrix>
SolverStatus Lamg<Matrix>::solve(const Vector& rhs, Vector& result, count maxConvergenceTime, count maxIterations) {
if (!validSetup || result.getDimension() != laplacianMatrix.numberOfColumns()
|| rhs.getDimension() != laplacianMatrix.numberOfRows()) {
throw std::runtime_error("No or wrong matrix is setup for given vectors.");
}
SolverStatus status;
if (numComponents == 1) {
LAMGSolverStatus stat;
stat.desiredResidualReduction = this->tolerance * rhs.length() / (laplacianMatrix * result - rhs).length();
stat.maxIters = maxIterations;
stat.maxConvergenceTime = maxConvergenceTime;
compSolvers[0].solve(result, rhs, stat);
status.residual = stat.residual;
status.numIters = stat.numIters;
status.converged = stat.converged;
} else {
// solve on every component
count maxIters = 0;
for (index i = 0; i < components.size(); ++i) {
for (auto element : components[i]) {
initialVectors[i][graph2Components[element]] = result[element];
rhsVectors[i][graph2Components[element]] = rhs[element];
}
double resReduction = this->tolerance * rhsVectors[i].length() / (compHierarchies[i].at(0).getLaplacian() * initialVectors[i] - rhsVectors[i]).length();
compStati[i].desiredResidualReduction = resReduction * components[i].size() / laplacianMatrix.numberOfRows();
compStati[i].maxIters = maxIterations;
compStati[i].maxConvergenceTime = maxConvergenceTime;
compSolvers[i].solve(initialVectors[i], rhsVectors[i], compStati[i]);
for (auto element : components[i]) { // write solution back to result
result[element] = initialVectors[i][graph2Components[element]];
}
maxIters = std::max(maxIters, compStati[i].numIters);
}
status.residual = (rhs - laplacianMatrix * result).length();
status.converged = status.residual <= this->tolerance;
status.numIters = maxIters;
}
return status;
}
template<class Matrix>
void Lamg<Matrix>::parallelSolve(const std::vector<Vector>& rhs, std::vector<Vector>& results, count maxConvergenceTime, count maxIterations) {
if (numComponents == 1) {
assert(rhs.size() == results.size());
const index numThreads = omp_get_max_threads();
if (compSolvers.size() != numThreads) {
compSolvers.clear();
for (index i = 0; i < (index) numThreads; ++i) {
compSolvers.push_back(SolverLamg<Matrix>(compHierarchies[0], smoother));
}
}
bool nested = omp_get_nested();
if (nested) omp_set_nested(false);
#pragma omp parallel for
for (index i = 0; i < rhs.size(); ++i) {
index threadId = omp_get_thread_num();
LAMGSolverStatus stat;
stat.desiredResidualReduction = this->tolerance * rhs[i].length() / (laplacianMatrix * results[i] - rhs[i]).length();
stat.maxIters = maxIterations;
stat.maxConvergenceTime = maxConvergenceTime;
compSolvers[threadId].solve(results[i], rhs[i], stat);
}
if (nested) omp_set_nested(true);
}
}
} /* namespace NetworKit */
#endif /* NETWORKIT_CPP_NUMERICS_LAMG_LAMG_H_ */
|
CartesianMPIHDF.h | // File : CartesianMPIHDF.h
// Created : Wed Jan 29 2020 10:25:26 AM (+0100)
// Author : Fabian Wermelinger
// Description: HDF IO routines for Cartesian MPI grid types
// Copyright 2020 ETH Zurich. All Rights Reserved.
#ifndef CARTESIANMPIHDF_H_S5X4YWDT
#define CARTESIANMPIHDF_H_S5X4YWDT
#include "Cubism/Common.h"
#include "Cubism/IO/FieldAOS.h"
#include "Cubism/IO/HDFDriver.h"
#include <cstdio>
#include <fstream>
#include <string>
NAMESPACE_BEGIN(Cubism)
NAMESPACE_BEGIN(IO)
DISABLE_WARNING_PUSH
DISABLE_WARNING_UNREFERENCED_FORMAL_PARAMETER
/**
* @ingroup IO
* @brief Write Cartesian MPI grid data to HDF file
* @tparam FileDataType HDF file data type
* @tparam Grid Grid type
* @tparam Mesh Mesh type
* @tparam Dir Special type that defines a cast to ``size_t``
* @param fname Output full filename without file extension
* @param aname Name of quantity in ``grid``
* @param grid Input grid
* @param mesh Input mesh corresponding to the extracted data
* @param time Current time
* @param face_dir Face direction (relevant for ``Cubism::EntityType::Face``)
* @param create_xdmf Flag for XDMF wrapper
*
* @rst
* Write the data carried by the MPI ``grid`` to an HDF5 container file. The
* data that is written to the file is specified by the index space described in
* ``mesh``.
* @endrst
*/
template <typename FileDataType,
typename Grid,
typename Mesh,
typename Dir = size_t>
void CartesianMPIWriteHDF(const std::string &fname,
const std::string &aname,
const Grid &grid,
const Mesh &mesh,
const double time,
const Dir face_dir = 0,
const bool create_xdmf = true)
{
#ifdef CUBISM_USE_HDF
static_assert(Grid::BaseType::Class == Cubism::FieldClass::Scalar ||
Grid::BaseType::Class == Cubism::FieldClass::Tensor ||
Grid::BaseType::Class ==
Cubism::FieldClass::FaceContainer,
"CartesianMPIWriteHDF: Unsupported Cubism::FieldClass");
using IRange = typename Mesh::IndexRangeType;
using MIndex = typename IRange::MultiIndex;
constexpr typename Cubism::EntityType entity = Grid::EntityType;
constexpr size_t NComp = Grid::NComponents;
const size_t dface = static_cast<size_t>(face_dir);
const auto &rmesh = grid.getMesh(); // rank local mesh
const auto clip_global = // clip 'mesh' to the global grid mesh boundary
mesh.getSubMesh(rmesh.getGlobalBegin(), rmesh.getGlobalEnd());
const auto clip_rank = clip_global->getSubMesh(
rmesh.getIndexRange(entity, dface), entity, dface);
const IRange file_span = clip_global->getIndexRange(entity, dface);
const IRange data_span = clip_rank->getIndexRange(entity, dface);
const MIndex rank_extent = data_span.getExtent();
if (create_xdmf && grid.isRoot()) {
std::printf(
"CartesianMPIWriteHDF: Allocating %.1f GB file buffer (%s)\n",
file_span.getExtent().prod() * NComp * sizeof(FileDataType) /
1024. / 1024. / 1024.,
fname.c_str());
}
FileDataType *buf = new FileDataType[rank_extent.prod() * NComp];
#pragma omp parallel for
for (size_t i = 0; i < grid.size(); ++i) {
const auto &bf = grid[i]; // block field
Field2AOS(bf, data_span, buf, dface);
}
HDFDriverMPI<FileDataType, typename Mesh::BaseMesh, Mesh::Class> hdf_driver;
hdf_driver.comm = grid.getCartComm();
hdf_driver.file_span = file_span;
hdf_driver.data_span = data_span;
hdf_driver.write(
fname, aname, buf, *clip_global, entity, NComp, time, create_xdmf);
delete[] buf;
#else
std::fprintf(stderr,
"CartesianMPIWriteHDF: HDF not supported (%s)\n",
fname.c_str());
#endif /* CUBISM_USE_HDF */
}
/**
* @ingroup IO
* @brief Write Cartesian MPI grid data to HDF file
* @tparam FileDataType HDF file data type
* @tparam Grid Grid type
* @tparam Dir Special type that defines a cast to ``size_t``
* @param fname Output full filename without file extension
* @param aname Name of quantity in ``grid``
* @param grid Input grid
* @param time Current time
* @param face_dir Face direction (relevant for ``Cubism::EntityType::Face``)
* @param create_xdmf Flag for XDMF wrapper
*
* Convenience wrapper to dump a full MPI grid to an HDF container file.
*/
template <typename FileDataType, typename Grid, typename Dir = size_t>
void CartesianMPIWriteHDF(const std::string &fname,
const std::string &aname,
const Grid &grid,
const double time,
const Dir face_dir = 0,
const bool create_xdmf = true)
{
Cubism::IO::CartesianMPIWriteHDF<FileDataType>(
fname,
aname,
grid,
grid.getGlobalMesh(),
time,
static_cast<size_t>(face_dir),
create_xdmf);
}
/**
* @ingroup IO
* @brief Read Cartesian MPI grid data from HDF file
* @tparam FileDataType HDF file data type
* @tparam Grid Grid type
* @tparam Mesh Mesh type
* @tparam Dir Special type that defines a cast to ``size_t``
* @param fname Input full filename without file extension
* @param grid Grid populated with file data
* @param mesh Grid (sub)mesh
* @param face_dir Face direction (relevant for ``Cubism::EntityType::Face``)
*
* @rst
* Read the data of an HDF5 container file into the MPI ``grid``. The data that
* is read from the file is specified by the index space described in ``mesh``.
* @endrst
*/
template <typename FileDataType,
typename Grid,
typename Mesh,
typename Dir = size_t>
void CartesianMPIReadHDF(const std::string &fname,
Grid &grid,
const Mesh &mesh,
const Dir face_dir = 0)
{
#ifdef CUBISM_USE_HDF
static_assert(Grid::BaseType::Class == Cubism::FieldClass::Scalar ||
Grid::BaseType::Class == Cubism::FieldClass::Tensor ||
Grid::BaseType::Class ==
Cubism::FieldClass::FaceContainer,
"CartesianMPIReadHDF: Unsupported Cubism::FieldClass");
{
std::ifstream file(fname + ".h5");
if (grid.isRoot() && !file.good()) {
throw std::runtime_error("CartesianMPIReadHDF: File '" + fname +
"' does not exist");
}
}
using IRange = typename Mesh::IndexRangeType;
using MIndex = typename IRange::MultiIndex;
constexpr typename Cubism::EntityType entity = Grid::EntityType;
constexpr size_t NComp = Grid::NComponents;
const size_t dface = static_cast<size_t>(face_dir);
const auto &rmesh = grid.getMesh(); // rank local mesh
const auto clip_global = // clip 'mesh' to the global grid mesh boundary
mesh.getSubMesh(rmesh.getGlobalBegin(), rmesh.getGlobalEnd());
const auto clip_rank = clip_global->getSubMesh(
rmesh.getIndexRange(entity, dface), entity, dface);
const IRange file_span = clip_global->getIndexRange(entity, dface);
const IRange data_span = clip_rank->getIndexRange(entity, dface);
const MIndex rank_extent = data_span.getExtent();
FileDataType *buf = new FileDataType[rank_extent.prod() * NComp];
HDFDriverMPI<FileDataType, typename Mesh::BaseMesh, Mesh::Class> hdf_driver;
hdf_driver.comm = grid.getCartComm();
hdf_driver.file_span = file_span;
hdf_driver.data_span = data_span;
hdf_driver.read(fname, buf, NComp);
#pragma omp parallel for
for (size_t i = 0; i < grid.size(); ++i) {
auto &bf = grid[i]; // block field
AOS2Field(buf, data_span, bf, dface);
}
delete[] buf;
#else
std::fprintf(
stderr, "CartesianMPIReadHDF: HDF not supported (%s)\n", fname.c_str());
#endif /* CUBISM_USE_HDF */
}
/**
* @ingroup IO
* @brief Read Cartesian grid data from HDF file
* @tparam FileDataType HDF file data type
* @tparam Grid Grid type
* @tparam Dir Special type that defines a cast to ``size_t``
* @param fname Input full filename without file extension
* @param grid Grid populated with file data
* @param face_dir Face direction (relevant for ``Cubism::EntityType::Face``)
*
* Convenience wrapper to read a full MPI grid from an HDF container file.
*/
template <typename FileDataType, typename Grid, typename Dir = size_t>
void CartesianMPIReadHDF(const std::string &fname,
Grid &grid,
const Dir face_dir = 0)
{
Cubism::IO::CartesianMPIReadHDF<FileDataType>(
fname, grid, grid.getGlobalMesh(), static_cast<size_t>(face_dir));
}
DISABLE_WARNING_POP
NAMESPACE_END(IO)
NAMESPACE_END(Cubism)
#endif /* CARTESIANMPIHDF_H_S5X4YWDT */
|
GB_binop__rminus_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_01__rminus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__rminus_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fp32)
// A*D function (colscale): GB (_AxD__rminus_fp32)
// D*A function (rowscale): GB (_DxB__rminus_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_fp32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fp32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fp32)
// C=scalar+B GB (_bind1st__rminus_fp32)
// C=scalar+B' GB (_bind1st_tran__rminus_fp32)
// C=A+scalar GB (_bind2nd__rminus_fp32)
// C=A'+scalar GB (_bind2nd_tran__rminus_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_FP32 || GxB_NO_RMINUS_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rminus_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ZQ_FaceDatabaseMaker.h | #ifndef _ZQ_FACE_DATABASE_MAKER_H_
#define _ZQ_FACE_DATABASE_MAKER_H_
#pragma once
#include <sstream>
#include <direct.h>
#include <windows.h>
#include <io.h>
#include <omp.h>
#include <opencv2\opencv.hpp>
#include "ZQ_FaceDetector.h"
#include "ZQ_FaceRecognizer.h"
#include "ZQ_FaceDatabase.h"
#include "ZQ_FaceDatabaseCompact.h"
#include "ZQ_FaceRecognizerSphereFace.h"
#include "ZQ_MergeSort.h"
namespace ZQ
{
class ZQ_FaceDatabaseMaker
{
public:
enum ErrorCode
{
ERR_WARNING = 0,
ERR_FATAL = 1
};
enum MakeDatabaseType
{
ONLY_MERGE_FEATS = 0,
UPDATE_WHO_NOT_HAVE_FEATS = 1,
FORCE_UPDATE_ALL = 2
};
public:
static bool MakeDatabase(std::vector<ZQ_FaceDetector*>& detectors, std::vector<ZQ_FaceRecognizer*> recognizers,
const std::string& database_root, const std::string& database_featsfile, const std::string& database_namesfile,
MakeDatabaseType type = ONLY_MERGE_FEATS, bool show_face = false, int max_thread_num = 1)
{
for (int i = 0; i < detectors.size(); i++)
if (detectors[i] == 0)
return false;
for (int i = 0; i < recognizers.size(); i++)
if (recognizers[i] == 0)
return false;
return _make_database(detectors, recognizers, database_root, database_featsfile, database_namesfile, type, show_face,
max_thread_num, false);
}
static bool MakeDatabaseAlreadyCropped(std::vector<ZQ_FaceRecognizer*> recognizers,
const std::string& database_root, const std::string& database_featsfile, const std::string& database_namesfile,
MakeDatabaseType type = ONLY_MERGE_FEATS, bool show_face = false, int max_thread_num = 1)
{
for (int i = 0; i < recognizers.size(); i++)
if (recognizers[i] == 0)
return false;
return _make_database_already_cropped(recognizers, database_root, database_featsfile, database_namesfile, type, show_face,
max_thread_num, false);
}
static bool MakeDatabaseCompact(std::vector<ZQ_FaceDetector*>& detectors, std::vector<ZQ_FaceRecognizer*> recognizers,
const std::string& database_root, const std::string& database_featsfile, const std::string& database_namesfile,
MakeDatabaseType type = ONLY_MERGE_FEATS, bool show_face = false, int max_thread_num = 1)
{
for (int i = 0; i < detectors.size(); i++)
if (detectors[i] == 0)
return false;
for (int i = 0; i < recognizers.size(); i++)
if (recognizers[i] == 0)
return false;
return _make_database(detectors, recognizers, database_root, database_featsfile, database_namesfile, type, show_face,
max_thread_num, true);
}
static bool MakeDatabaseCompactAlreadyCropped(std::vector<ZQ_FaceRecognizer*> recognizers,
const std::string& database_root, const std::string& database_featsfile, const std::string& database_namesfile,
MakeDatabaseType type = ONLY_MERGE_FEATS, bool show_face = false, int max_thread_num = 1)
{
for (int i = 0; i < recognizers.size(); i++)
if (recognizers[i] == 0)
return false;
return _make_database_already_cropped(recognizers, database_root, database_featsfile, database_namesfile, type, show_face,
max_thread_num, true);
}
static bool CropImagesForDatabase(const std::vector<ZQ_FaceDetector*>& detectors, const std::vector<ZQ_FaceRecognizer*>& recognizers,
const std::string& src_root, const std::string& dst_root, int max_thread_num = 4, bool strict_check = true,
std::string err_logfile = "err_log.txt", bool only_for_high_quality = false)
{
return _crop_images_for_database(detectors, recognizers, src_root, dst_root, max_thread_num, strict_check, err_logfile, only_for_high_quality);
}
/*must be cropped image*/
static bool DetectOutliersInDatabase(const std::vector<ZQ_FaceRecognizer*>& recognizers, const std::string& src_root, int max_thread_num = 4,
const std::string out_file = "outlier_score.txt")
{
return _detect_outliers_in_database(recognizers, src_root, max_thread_num, out_file);
}
private:
static bool _make_database(std::vector<ZQ_FaceDetector*>& detectors, std::vector<ZQ_FaceRecognizer*> recognizers,
const std::string& database_root, const std::string& database_featsfile, const std::string& database_namesfile,
MakeDatabaseType type = ONLY_MERGE_FEATS, bool show_face = false, int max_thread_num = 1, bool compact = false)
{
if (type != ONLY_MERGE_FEATS && type != UPDATE_WHO_NOT_HAVE_FEATS && type != FORCE_UPDATE_ALL)
{
printf("type must be : ONLY_MERGE_FEATS(%d), UPDATE_WHO_NOT_HAVE_FEATS(%d), FORCE_UPDATE_ALL(%d)\n",
ONLY_MERGE_FEATS, UPDATE_WHO_NOT_HAVE_FEATS, FORCE_UPDATE_ALL);
return false;
}
int num_detectors = detectors.size();
int num_recognizers = recognizers.size();
if (num_detectors == 0 || num_recognizers == 0)
{
printf("You should use at least one detector and one recognizer\n");
return false;
}
ZQ_FaceDatabase database;
std::vector<ErrorCode> ErrorCodes;
std::vector<std::string> error_messages;
std::string err_logfile = "err_log.txt";
std::ostringstream oss;
std::vector<std::string> person_names;
std::vector<std::vector<std::string>> filenames;
std::vector<std::vector<ZQ_CNN_BBox>> boxes;
std::vector<std::vector<bool>> fail_flag;
_auto_detect_database(database_root, person_names, filenames);
int num_cores = omp_get_num_procs() - 1;
int real_thread_num = __min(max_thread_num, __min(num_cores, __min(num_detectors, num_recognizers)));
printf("real_thread_num = %d\n", real_thread_num);
int feat_dim = 0;
feat_dim = recognizers[0]->GetFeatDim();
/****************************************/
int person_num = person_names.size();
database.persons.resize(person_num);
database.names = person_names;
double start_time = omp_get_wtime();
printf("begin\n");
std::vector<std::pair<int, int>> pairs;
for (int i = 0; i < person_num; i++)
{
for (int j = 0; j < filenames[i].size(); j++)
pairs.push_back(std::make_pair(i, j));
}
#pragma omp parallel for schedule(dynamic, 100) num_threads(real_thread_num)
for (int p = 0; p < pairs.size(); p++)
{
int i = pairs[p].first;
int j = pairs[p].second;
int id = omp_get_thread_num();
std::ostringstream oss;
ZQ_FaceFeature feat;
cv::Mat crop;
ErrorCode err_code;
std::string err_msg;
bool has_feat = false;
bool need_detect = false;
if (type == ONLY_MERGE_FEATS)
{
if (!_load_feature_from_file(filenames[i][j], feat))
{
need_detect = false;
has_feat = false;
}
else
has_feat = true;
}
else if (type == UPDATE_WHO_NOT_HAVE_FEATS)
{
if (!_load_feature_from_file(filenames[i][j], feat))
{
need_detect = true;
has_feat = false;
}
else
has_feat = true;
}
else if (type == FORCE_UPDATE_ALL)
need_detect = true;
bool need_write = false;
bool ret = true;
if (need_detect)
{
if (!_extract_feature_from_img(*detectors[id], *recognizers[id], filenames[i][j], feat, crop, err_code, err_msg, false))
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
need_write = true;
has_feat = true;
if (show_face)
{
if (id == 0)
{
cv::namedWindow("crop");
cv::imshow("crop", crop);
cv::waitKey(5);
}
}
}
#pragma omp critical
{
if (has_feat)
{
database.persons[i].features.push_back(feat);
database.persons[i].filenames.push_back(filenames[i][j]);
}
}
if (need_write)
_write_feature_to_file(filenames[i][j], feat);
}
double end_time = omp_get_wtime();
printf("detect_and_extract total_cost:%.3f s\n", (end_time - start_time));
/*******************/
for (int i = person_num - 1; i >= 0; i--)
{
if (database.persons[i].features.size() == 0)
{
printf("person [%d]: %s has no data\n", i, person_names[i].c_str());
oss.str("");
oss << "person [" << i << "]: " << person_names[i] << " has no data";
ErrorCodes.push_back(ERR_WARNING);
error_messages.push_back(oss.str());
database.persons.erase(database.persons.begin() + i);
person_names.erase(person_names.begin() + i);
}
}
database.names = person_names;
if (compact)
{
if (!database.SaveToFileBinaryCompact(database_featsfile, database_namesfile))
{
printf("failed to save database\n");
return EXIT_FAILURE;
}
}
else
{
if (!database.SaveToFileBinary(database_featsfile, database_namesfile))
{
printf("failed to save database\n");
return EXIT_FAILURE;
}
}
printf("all done\n");
_write_error_messages(err_logfile, ErrorCodes, error_messages);
return EXIT_SUCCESS;
}
static bool _make_database_already_cropped(std::vector<ZQ_FaceRecognizer*> recognizers,
const std::string& database_root, const std::string& database_featsfile, const std::string& database_namesfile,
MakeDatabaseType type = ONLY_MERGE_FEATS, bool show_face = false, int max_thread_num = 1, bool compact = false)
{
if (type != ONLY_MERGE_FEATS && type != UPDATE_WHO_NOT_HAVE_FEATS && type != FORCE_UPDATE_ALL)
{
printf("type must be : ONLY_MERGE_FEATS(%d), UPDATE_WHO_NOT_HAVE_FEATS(%d), FORCE_UPDATE_ALL(%d)\n",
ONLY_MERGE_FEATS, UPDATE_WHO_NOT_HAVE_FEATS, FORCE_UPDATE_ALL);
return false;
}
int num_recognizers = recognizers.size();
if (num_recognizers == 0)
{
printf("You should use at least one recognizer\n");
return false;
}
ZQ_FaceDatabase database;
std::vector<ErrorCode> ErrorCodes;
std::vector<std::string> error_messages;
std::string err_logfile = "err_log.txt";
std::ostringstream oss;
std::vector<std::string> person_names;
std::vector<std::vector<std::string>> filenames;
std::vector<std::vector<ZQ_CNN_BBox>> boxes;
std::vector<std::vector<bool>> fail_flag;
_auto_detect_database(database_root, person_names, filenames);
int num_cores = omp_get_num_procs() - 1;
int real_thread_num = __min(max_thread_num, __min(num_cores, num_recognizers));
printf("real_thread_num = %d\n", real_thread_num);
int feat_dim = 0;
feat_dim = recognizers[0]->GetFeatDim();
/****************************************/
int person_num = person_names.size();
database.persons.resize(person_num);
database.names = person_names;
double start_time = omp_get_wtime();
printf("begin\n");
std::vector<std::pair<int, int>> pairs;
for (int i = 0; i < person_num; i++)
{
for (int j = 0; j < filenames[i].size(); j++)
pairs.push_back(std::make_pair(i, j));
}
#pragma omp parallel for schedule(dynamic, 100) num_threads(real_thread_num)
for (int p = 0; p < pairs.size(); p++)
{
int i = pairs[p].first;
int j = pairs[p].second;
int id = omp_get_thread_num();
std::ostringstream oss;
ZQ_FaceFeature feat;
cv::Mat crop;
ErrorCode err_code;
std::string err_msg;
bool has_feat = false;
bool need_detect = false;
if (type == ONLY_MERGE_FEATS)
{
if (!_load_feature_from_file(filenames[i][j], feat))
{
need_detect = false;
has_feat = false;
}
else
has_feat = true;
}
else if (type == UPDATE_WHO_NOT_HAVE_FEATS)
{
if (!_load_feature_from_file(filenames[i][j], feat))
{
need_detect = true;
has_feat = false;
}
else
has_feat = true;
}
else if (type == FORCE_UPDATE_ALL)
need_detect = true;
bool need_write = false;
bool ret = true;
if (need_detect)
{
if (!_extract_feature_from_cropped_image(*recognizers[id], filenames[i][j], feat, crop, err_code, err_msg))
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
need_write = true;
has_feat = true;
if (show_face)
{
if (id == 0)
{
cv::namedWindow("crop");
cv::imshow("crop", crop);
cv::waitKey(5);
}
}
}
#pragma omp critical
{
if (has_feat)
{
database.persons[i].features.push_back(feat);
database.persons[i].filenames.push_back(filenames[i][j]);
}
}
if (need_write)
_write_feature_to_file(filenames[i][j], feat);
}
double end_time = omp_get_wtime();
printf("detect_and_extract total_cost:%.3f s\n", (end_time - start_time));
/*******************/
for (int i = person_num - 1; i >= 0; i--)
{
if (database.persons[i].features.size() == 0)
{
printf("person [%d]: %s has no data\n", i, person_names[i].c_str());
oss.str("");
oss << "person [" << i << "]: " << person_names[i] << " has no data";
ErrorCodes.push_back(ERR_WARNING);
error_messages.push_back(oss.str());
database.persons.erase(database.persons.begin() + i);
person_names.erase(person_names.begin() + i);
}
}
database.names = person_names;
if (compact)
{
if (!database.SaveToFileBinaryCompact(database_featsfile, database_namesfile))
{
printf("failed to save database\n");
return EXIT_FAILURE;
}
}
else
{
if (!database.SaveToFileBinary(database_featsfile, database_namesfile))
{
printf("failed to save database\n");
return EXIT_FAILURE;
}
}
printf("all done\n");
_write_error_messages(err_logfile, ErrorCodes, error_messages);
return EXIT_SUCCESS;
}
static bool _crop_images_for_database(const std::vector<ZQ_FaceDetector*>& detectors, const std::vector<ZQ_FaceRecognizer*>& recognizers,
const std::string& src_root, const std::string& dst_root, int max_thread_num = 4, bool strict_check = true, std::string err_logfile = "err_log.txt",
bool only_for_high_quality = false)
{
int num_detector = detectors.size();
int num_recognizer = recognizers.size();
if (num_detector == 0 || num_recognizer == 0)
return false;
std::vector<ErrorCode> ErrorCodes;
std::vector<std::string> error_messages;
int num_cores = omp_get_num_procs();
int real_thread_num = __max(1, __min(num_cores - 1, max_thread_num));
real_thread_num = __min(real_thread_num, __min(detectors.size(), recognizers.size()));
std::vector<std::string> person_names;
std::vector<std::vector<std::string>> filenames;
_auto_detect_database(src_root, person_names, filenames);
int person_num = person_names.size();
_mkdir(dst_root.c_str());
for (int i = 0; i < person_num; i++)
{
std::string path = dst_root + "\\" + person_names[i];
_mkdir(path.c_str());
}
clock_t start_time = clock();
//double start_time = omp_get_wtime();
printf("begin\n");
std::vector<std::pair<int, int>> pairs;
for (int i = 0; i < person_num; i++)
{
for (int j = 0; j < filenames[i].size(); j++)
pairs.push_back(std::make_pair(i, j));
}
clock_t start = clock();
if (real_thread_num == 1)
{
for (int p = 0; p < pairs.size(); p++)
{
int i = pairs[p].first;
int j = pairs[p].second;
int id = omp_get_thread_num();
int crop_width = recognizers[id]->GetCropWidth();
int crop_height = recognizers[id]->GetCropHeight();
std::ostringstream oss;
cv::Mat crop(crop_height, crop_width, CV_MAKETYPE(8, 3));
ErrorCode err_code;
std::string err_msg;
bool ret = true;
cv::Mat image = cv::imread(filenames[i][j]);
if (image.empty())
{
printf("failed to read image: %s\n", filenames[i][j].c_str());
oss.str("");
oss << "failed to read image: " << filenames[i][j];
err_code = ERR_WARNING;
err_msg = oss.str();
ret = false;
}
if (!ret)
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
ZQ_CNN_BBox box;
ret = _get_face5point_from_img(*detectors[id], filenames[i][j], image, box, err_code, err_msg, strict_check, only_for_high_quality);
if (!ret)
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
float facial5point[10] =
{
box.ppoint[0],box.ppoint[5],
box.ppoint[1],box.ppoint[6],
box.ppoint[2],box.ppoint[7],
box.ppoint[3],box.ppoint[8],
box.ppoint[4],box.ppoint[9]
};
ret = recognizers[id]->CropImage(image.data, image.cols, image.rows, image.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, box.ppoint, box.ppoint + 5, crop.data, crop.step[0]);
if (!ret)
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
size_t pos = filenames[i][j].find_last_of('\\');
if (pos != std::string::npos)
{
std::string real_name(filenames[i][j].c_str() + pos + 1);
std::string fullname = dst_root + "\\" + person_names[i] + "\\" + real_name;
cv::imwrite(fullname, crop);
#pragma omp critical
{
//std::cout << fullname << "\n";
}
}
}
}
else
{
#pragma omp parallel for schedule(dynamic, 10) num_threads(real_thread_num)
for (int p = 0; p < pairs.size(); p++)
{
int i = pairs[p].first;
int j = pairs[p].second;
int id = omp_get_thread_num();
int crop_width = recognizers[id]->GetCropWidth();
int crop_height = recognizers[id]->GetCropHeight();
std::ostringstream oss;
cv::Mat crop(crop_height, crop_width, CV_MAKETYPE(8, 3));
ErrorCode err_code;
std::string err_msg;
bool ret = true;
cv::Mat image = cv::imread(filenames[i][j]);
if (image.empty())
{
printf("failed to read image: %s\n", filenames[i][j].c_str());
oss.str("");
oss << "failed to read image: " << filenames[i][j];
err_code = ERR_WARNING;
err_msg = oss.str();
ret = false;
}
if (!ret)
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
ZQ_CNN_BBox box;
ret = _get_face5point_from_img(*detectors[id], filenames[i][j], image, box, err_code, err_msg, strict_check, only_for_high_quality);
if (!ret)
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
float facial5point[10] =
{
box.ppoint[0],box.ppoint[5],
box.ppoint[1],box.ppoint[6],
box.ppoint[2],box.ppoint[7],
box.ppoint[3],box.ppoint[8],
box.ppoint[4],box.ppoint[9]
};
ret = recognizers[id]->CropImage(image.data, image.cols, image.rows, image.step[0], ZQ_PixelFormat::ZQ_PIXEL_FMT_BGR, box.ppoint, box.ppoint + 5, crop.data, crop.step[0]);
if (!ret)
{
#pragma omp critical
{
ErrorCodes.push_back(err_code);
error_messages.push_back(err_msg);
}
continue;
}
size_t pos = filenames[i][j].find_last_of('\\');
if (pos != std::string::npos)
{
std::string real_name(filenames[i][j].c_str() + pos + 1);
std::string fullname = dst_root + "\\" + person_names[i] + "\\" + real_name;
cv::imwrite(fullname, crop);
#pragma omp critical
{
//std::cout << fullname << "\n";
}
}
}
}
clock_t end = clock();
printf("time: %f\n", 0.001*(end - start));
_write_error_messages(err_logfile, ErrorCodes, error_messages);
return true;
}
static bool _extract_feature_from_box(ZQ_FaceRecognizer& recognizer, const std::string& imgfile, const cv::Mat& image, const ZQ_CNN_BBox& box,
ZQ_FaceFeature& feat, cv::Mat& crop, ErrorCode& err_code, std::string& err_msg)
{
int nChannels = image.channels();
ZQ_PixelFormat pixFmt = (nChannels == 1) ? ZQ_PIXEL_FMT_GRAY : ZQ_PIXEL_FMT_BGR;
int width = recognizer.GetCropWidth();
int height = recognizer.GetCropHeight();
int feat_dim = recognizer.GetFeatDim();
crop = cv::Mat(cv::Size(width, height), CV_MAKETYPE(8U, 3));
std::ostringstream oss;
if (!recognizer.CropImage(image.data, image.cols, image.rows, image.step[0], pixFmt, box.ppoint, box.ppoint + 5, crop.data, crop.step[0]))
{
printf("failed to crop face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to crop face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
feat.ChangeSize(feat_dim);
if (!recognizer.ExtractFeature(crop.data, crop.step[0], pixFmt, feat.pData, true))
{
printf("failed to extract feature in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to extract feature in image: " << imgfile.c_str();
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
return true;
}
static bool _extract_feature_from_cropped_image(ZQ_FaceRecognizer& recognizer, const std::string& imgfile, const cv::Mat& image,
ZQ_FaceFeature& feat, ErrorCode& err_code, std::string& err_msg)
{
int nChannels = image.channels();
ZQ_PixelFormat pixFmt = (nChannels == 1) ? ZQ_PIXEL_FMT_GRAY : ZQ_PIXEL_FMT_BGR;
int width = recognizer.GetCropWidth();
int height = recognizer.GetCropHeight();
int feat_dim = recognizer.GetFeatDim();
std::ostringstream oss;
feat.ChangeSize(feat_dim);
if (image.cols != width || image.rows != height
|| !recognizer.ExtractFeature(image.data, image.step[0], pixFmt, feat.pData, true))
{
printf("failed to extract feature in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to extract feature in image: " << imgfile.c_str();
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
return true;
}
static bool _extract_feature_from_img(ZQ_FaceDetector& detector, ZQ_FaceRecognizer& recognizer,
const std::string& imgfile, ZQ_FaceFeature& feat, cv::Mat& crop, ErrorCode& err_code, std::string& err_msg,
bool only_for_high_quality)
{
std::ostringstream oss;
cv::Mat image = cv::imread(imgfile);
if (image.empty())
{
printf("failed to read image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to read image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
double t1 = omp_get_wtime();
ZQ_CNN_BBox box;
if (!_get_face5point_from_img(detector, imgfile, image, box, err_code, err_msg, only_for_high_quality))
return false;
double t2 = omp_get_wtime();
if (!_extract_feature_from_box(recognizer, imgfile, image, box, feat, crop, err_code, err_msg))
return false;
double t3 = omp_get_wtime();
printf("image: %s done! findface: %.3f, extract: %.3f\n", imgfile.c_str(), t2 - t1, t3 - t2);
return true;
}
static bool _extract_feature_from_cropped_image(ZQ_FaceRecognizer& recognizer,
const std::string& imgfile, ZQ_FaceFeature& feat, cv::Mat& crop, ErrorCode& err_code, std::string& err_msg)
{
std::ostringstream oss;
cv::Mat image = cv::imread(imgfile);
if (image.empty())
{
printf("failed to read image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to read image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
double t1 = omp_get_wtime();
if (!_extract_feature_from_cropped_image(recognizer, imgfile, image, feat, err_code, err_msg))
return false;
double t2 = omp_get_wtime();
printf("image: %s done! extract: %.3f\n", imgfile.c_str(), t2 - t1);
crop = image;
return true;
}
static bool _load_feature_from_file(const std::string& imgfile, ZQ_FaceFeature& feat)
{
std::string feat_file = imgfile + ".imgfeat";
FILE* in = 0;
if( 0 != fopen_s(&in, feat_file.c_str(), "rb"))
return false;
int feat_dim = 0;
fread(&feat_dim, sizeof(int), 1, in);
feat.ChangeSize(feat_dim);
fread(feat.pData, sizeof(float), feat_dim, in);
fclose(in);
return true;
}
static bool _write_feature_to_file(const std::string& imgfile, const ZQ_FaceFeature& feat)
{
std::string feat_file = imgfile + ".imgfeat";
FILE* out = 0;
if (0 != fopen_s(&out, feat_file.c_str(), "wb"))
return false;
int feat_dim = feat.length;
fwrite(&feat_dim, sizeof(int), 1, out);
fwrite(feat.pData, sizeof(float), feat_dim, out);
fclose(out);
return true;
}
static bool _write_error_messages(const std::string& file, const std::vector<ErrorCode>& ErrorCodes, const std::vector<std::string>& error_messages)
{
FILE* out = 0;
if(0 != fopen_s(&out, file.c_str(), "w"))
return false;
int num = ErrorCodes.size();
if (num != error_messages.size())
return false;
for (int i = 0; i < num; i++)
{
fprintf(out, "err_code: %d: msg: %s\n", ErrorCodes[i], error_messages[i].c_str());
}
fclose(out);
return true;
}
static bool _auto_detect_database(const std::string& root_path, std::vector<std::string>& person_names, std::vector<std::vector<std::string>>& filenames)
{
std::string dir(root_path);
dir.append("\\*.*");
_finddata_t fileDir;
intptr_t lfDir;
person_names.clear();
filenames.clear();
if ((lfDir = _findfirst(dir.c_str(), &fileDir)) == -1l)
{
//printf("No file is found\n");
}
else
{
do {
std::string str(fileDir.name);
if (fileDir.attrib & _A_SUBDIR && 0 != strcmp(str.c_str(), ".") && 0 != strcmp(str.c_str(), ".."))
person_names.push_back(str);
} while (_findnext(lfDir, &fileDir) == 0);
}
_findclose(lfDir);
int person_num = person_names.size();
filenames.resize(person_num);
for (int i = 0; i < person_num; i++)
{
dir = root_path + "\\" + person_names[i] + "\\*.jpg";
if ((lfDir = _findfirst(dir.c_str(), &fileDir)) == -1l)
{
//printf("No file is found\n");
}
else
{
do {
std::string str(fileDir.name);
filenames[i].push_back(root_path + "\\" + person_names[i] + "\\" + str);
} while (_findnext(lfDir, &fileDir) == 0);
}
_findclose(lfDir);
}
return true;
}
static bool _write_database_txt(const std::string& data_base_file, const std::vector<std::vector<std::string>>& filenames)
{
int num = filenames.size();
FILE* out = 0;
if(0 != fopen_s(&out, data_base_file.c_str(), "w"))
{
return false;
}
fprintf(out, "%d\n", num);
for (int i = 0; i < num; i++)
{
for (int j = 0; j < filenames[i].size(); j++)
{
fprintf(out, "%d\n%s\n", i, filenames[i][j].c_str());
}
}
fprintf(out, "\n");
fclose(out);
return true;
}
static bool _get_face5point_from_img(ZQ_FaceDetector& detector, const std::string& imgfile, const cv::Mat& image, ZQ_CNN_BBox& box,
ErrorCode& err_code, std::string& err_msg, bool strict_check = true, bool only_for_high_quality = false)
{
std::ostringstream oss;
bool use_cuda = false;
std::vector<ZQ_CNN_BBox> bbox;
//first try
bool has_found = false;
ZQ_PixelFormat pixFmt = image.channels() == 1 ? ZQ_PIXEL_FMT_GRAY : ZQ_PIXEL_FMT_BGR;
if (!detector.FindFace(image.data, image.cols, image.rows, image.step[0], pixFmt, 60, 0.709, bbox) || bbox.size() == 0)
{
/*printf("failed to find face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to find face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();*/
has_found = false;
}
else
has_found = true;
if (!only_for_high_quality)
{
//second try
if (!has_found)
{
printf("second try\n");
if (!detector.FindFace(image.data, image.cols, image.rows, image.step[0], pixFmt, 40, 0.709, bbox) || bbox.size() == 0)
{
/*printf("failed to find face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to find face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();*/
has_found = false;
}
else
has_found = true;
}
//third try
if (!has_found)
{
printf("third try\n");
if (!detector.FindFace(image.data, image.cols, image.rows, image.step[0], pixFmt, 30, 0.8, bbox) || bbox.size() == 0)
{
/*printf("failed to find face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to find face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();*/
has_found = false;
}
else
has_found = true;
}
//fourth try
if (!has_found)
{
printf("fourth try\n");
if (!detector.FindFace(image.data, image.cols, image.rows, image.step[0], pixFmt, 20, 0.85, bbox) || bbox.size() == 0)
{
/*printf("failed to find face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to find face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();*/
has_found = false;
}
else
has_found = true;
}
//fifth try
if (!has_found)
{
printf("fifth try\n");
if (!detector.FindFace(image.data, image.cols, image.rows, image.step[0], pixFmt, 12, 0.9, bbox) || bbox.size() == 0)
{
/*printf("failed to find face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to find face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();*/
has_found = false;
}
else
has_found = true;
}
}
if (!has_found)
{
printf("failed to find face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "failed to find face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
if (bbox.size() > 1)
{
if (strict_check)
{
printf("find more than one face in image: %s\n", imgfile.c_str());
oss.str("");
oss << "find more than one face in image: " << imgfile;
err_code = ERR_WARNING;
err_msg = oss.str();
return false;
}
//pick the face closed to the center
float center[2] = { image.cols*0.5,image.rows*0.5 };
std::vector<float> distance(bbox.size());
for (int i = 0; i < bbox.size(); i++)
{
float cx = 0.5*(bbox[i].col1 + bbox[i].col2);
float cy = 0.5*(bbox[i].row1 + bbox[i].row2);
distance[i] = (center[0] - cx)*(center[0] - cx) + (center[1] - cy)*(center[1] - cy);
}
float min_dis = distance[0];
int min_id = 0;
for (int i = 1; i < bbox.size(); i++)
{
if (min_dis > distance[i])
{
min_dis = distance[i];
min_id = i;
}
}
box = bbox[min_id];
return true;
}
else
box = bbox[0];
return true;
}
static bool _detect_outliers_in_database(const std::vector<ZQ_FaceRecognizer*>& recognizers, const std::string& src_root, int max_thread_num,
const std::string& out_file)
{
int num_recognizer = recognizers.size();
if (num_recognizer == 0)
return false;
std::vector<ErrorCode> ErrorCodes;
std::vector<std::string> error_messages;
int num_cores = omp_get_num_procs();
int real_thread_num = __max(1, __min(num_cores - 1, max_thread_num));
real_thread_num = __min(real_thread_num, recognizers.size());
std::vector<std::string> person_names;
std::vector<float> person_min_scores;
std::vector<int> person_min_scores_i;
std::vector<int> person_min_scores_j;
std::vector<std::vector<std::string>> filenames;
_auto_detect_database(src_root, person_names, filenames);
int person_num = person_names.size();
if (person_num == 0)
{
printf("no person in %s\n", src_root.c_str());
return false;
}
person_min_scores.resize(person_num);
person_min_scores_i.resize(person_num);
person_min_scores_j.resize(person_num);
if (real_thread_num == 1)
{
for (int i = 0; i < person_num; i++)
{
float out_min_score;
int out_i, out_j;
if (!_detect_outlier_for_one_person(*(recognizers[0]), filenames[i], out_min_score, out_i, out_j))
{
printf("failed to detect outliter for %s\n", person_names[i].c_str());
return false;
}
if (filenames[i].size() == 0)
out_min_score = 100;
else if (filenames[i].size() == 1)
out_min_score = 10;
person_min_scores[i] = out_min_score;
person_min_scores_i[i] = out_i;
person_min_scores_j[i] = out_j;
//if ((i + 1) % 100 == 0)
{
printf("%d/%d handled\n", i + 1, person_num);
}
}
}
else
{
int handled[1] = { 0 };
#pragma omp parallel for num_threads(real_thread_num)
for (int i = 0; i < person_num; i++)
{
int thread_id = omp_get_thread_num();
float out_min_score;
int out_i, out_j;
if (!_detect_outlier_for_one_person(*(recognizers[thread_id]), filenames[i], out_min_score, out_i, out_j))
{
printf("failed to detect outliter for %s\n", person_names[i].c_str());
out_min_score = -1000;
}
if (filenames[i].size() == 0)
out_min_score = 100;
else if (filenames[i].size() == 1)
out_min_score = 10;
person_min_scores[i] = out_min_score;
person_min_scores_i[i] = out_i;
person_min_scores_j[i] = out_j;
#pragma omp critical
{
(*handled)++;
//if (handled % 100 == 0)
{
printf("%d/%d handled\n", *handled,person_num);
}
}
}
}
std::vector<int> sort_indices(person_num);
for (int i = 0; i < person_num; i++)
sort_indices[i] = i;
ZQ_MergeSort::MergeSort(&person_min_scores[0], &sort_indices[0], person_num, true);
FILE* out = 0;
if (0 != fopen_s(&out,out_file.c_str(), "w"))
{
printf("failed to create file %s\n", out_file.c_str());
return false;
}
for (int i = 0; i < person_num; i++)
{
int id = sort_indices[i];
fprintf(out, "%12.3f %s ", person_min_scores[i], person_names[id].c_str());
int img_num = filenames[id].size();
int out_i = person_min_scores_i[id];
int out_j = person_min_scores_j[id];
if (img_num > 1 && out_i >= 0 && out_i < img_num
&& out_j >= 0 && out_j < img_num)
{
fprintf(out, "%s %s\n", filenames[id][out_i].c_str(), filenames[id][out_j].c_str());
}
else
{
fprintf(out, "\n");
}
}
fclose(out);
return true;
}
static bool _detect_outlier_for_one_person(ZQ_FaceRecognizer& recognier, const std::vector<std::string>& filenames,
float& out_min_score, int& out_i, int& out_j)
{
out_i = 0;
out_j = 0;
out_min_score = 1;
int num = filenames.size();
if (num <= 1)
return true;
std::vector<ZQ_FaceFeature> feats(num);
int W = recognier.GetCropWidth();
int H = recognier.GetCropHeight();
int dim = recognier.GetFeatDim();
for (int i = 0; i < num; i++)
{
//printf("%d/%d\n", i + 1, num);
cv::Mat img = cv::imread(filenames[i]);
if (img.empty())
return false;
if (img.rows != H || img.cols != W || img.channels() != 3)
return false;
feats[i].ChangeSize(dim);
if (!recognier.ExtractFeature(img.data, img.step[0], ZQ_PIXEL_FMT_BGR, feats[i].pData, true))
return false;
}
out_min_score = FLT_MAX;
for (int i = 0; i < num - 1; i++)
{
for (int j = i + 1; j < num; j++)
{
float tmp_score = ZQ_MathBase::DotProduct(dim, feats[i].pData, feats[j].pData);
if (tmp_score <= out_min_score)
{
out_min_score = tmp_score;
out_i = i;
out_j = j;
}
}
}
return true;
}
};
}
#endif
|
GB_binop__fmod_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__fmod_fp32
// A.*B function (eWiseMult): GB_AemultB__fmod_fp32
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__fmod_fp32
// C+=b function (dense accum): GB_Cdense_accumb__fmod_fp32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__fmod_fp32
// C=scalar+B GB_bind1st__fmod_fp32
// C=scalar+B' GB_bind1st_tran__fmod_fp32
// C=A+scalar GB_bind2nd__fmod_fp32
// C=A'+scalar GB_bind2nd_tran__fmod_fp32
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = fmodf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = fmodf (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FMOD || GxB_NO_FP32 || GxB_NO_FMOD_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__fmod_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__fmod_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__fmod_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__fmod_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__fmod_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__fmod_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = fmodf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__fmod_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = fmodf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = fmodf (x, aij) ; \
}
GrB_Info GB_bind1st_tran__fmod_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = fmodf (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__fmod_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
openmp4.c | #include <math.h>
#include <omp.h>
void
cholesky(double *A, double *L, int n)
{
for (int i = 0; i < n; i++) {
double s = 0;
for (int k = 0; k < i; k++) {
s += L[k * n + i] * L[k * n + i];
}
L[i * n + i] = sqrt(A[i * n + i] - s);
#pragma omp parallel for
for (int j = i + 1; j < n; j++) {
double s = 0;
for (int k = 0; k < i; k++) {
s += L[k * n + i] * L[k * n + j];
}
L[i * n + j] = (1.0 / L[i * n + i] * (A[i * n + j] - s));
}
}
}
|
convolution_3x3_pack1to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_pack1to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = 9;
// im2col
Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator);
{
const int gap = w - outw;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
signed char* ptr = bottom_im2col.channel(p);
for (int u = 0; u < 3; u++)
{
for (int v = 0; v < 3; v++)
{
const signed char* sptr = img.row<const signed char>(u) + v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
ptr[0] = sptr[0];
ptr[1] = sptr[1];
ptr[2] = sptr[2];
ptr[3] = sptr[3];
sptr += 4;
ptr += 4;
}
for (; j + 1 < outw; j += 2)
{
ptr[0] = sptr[0];
ptr[1] = sptr[1];
sptr += 2;
ptr += 2;
}
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += 1;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack1to4_int8_neon(bottom_im2col, top_blob, kernel, opt);
}
static void conv3x3s2_pack1to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = 9;
// im2col
Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator);
{
const int gap = w * 2 - outw * 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
signed char* ptr = bottom_im2col.channel(p);
for (int u = 0; u < 3; u++)
{
for (int v = 0; v < 3; v++)
{
const signed char* sptr = img.row<const signed char>(u) + v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
ptr[0] = sptr[0];
ptr[1] = sptr[2];
ptr[2] = sptr[4];
ptr[3] = sptr[6];
sptr += 8;
ptr += 4;
}
for (; j + 1 < outw; j += 2)
{
ptr[0] = sptr[0];
ptr[1] = sptr[2];
sptr += 4;
ptr += 2;
}
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += 2;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack1to4_int8_neon(bottom_im2col, top_blob, kernel, opt);
}
|
parallel.h | /* This file is adapted from the CMU Problem-Based Benchmark Suite,
* https://github.com/cmuparlay/pbbslib
*/
#pragma once
#include <iostream>
static std::string scheduler_name();
static int num_workers();
static int worker_id();
// parallel loop from start (inclusive) to end (exclusive) running
// function f.
// f should map long to void.
// granularity is the number of iterations to run sequentially
// if 0 (default) then the scheduler will decide
// conservative uses a safer scheduler
template <typename F>
static void parallel_for(long start, long end, F f,
long granularity = 0,
bool conservative = false);
// runs the thunks left and right in parallel.
// both left and write should map void to void
// conservative uses a safer scheduler
template <typename Lf, typename Rf>
static void par_do(Lf left, Rf right, bool conservative=false);
//***************************************
// cilkplus
#if defined(CILK)
#include <cilk/cilk.h>
#include <cilk/cilk_api.h>
#include <iostream>
#include <sstream>
#define PAR_GRANULARITY 2000
inline std::string scheduler_name() {
return "Cilk";
}
inline int num_workers() {return __cilkrts_get_nworkers();}
inline int worker_id() {return __cilkrts_get_worker_number();}
inline void set_num_workers(int) {
throw std::runtime_error("don't know how to set worker count!");
}
// Not sure this still works
//__cilkrts_end_cilk();
// std::stringstream ss; ss << n;
// if (0 != __cilkrts_set_param("nworkers", ss.str().c_str()))
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool) {
cilk_spawn right();
left();
cilk_sync;
}
template <typename F>
inline void parallel_for(long start, long end, F f,
long granularity,
bool) {
if (granularity == 0)
cilk_for(long i=start; i<end; i++) f(i);
else if ((end - start) <= granularity)
for (long i=start; i < end; i++) f(i);
else {
long n = end-start;
long mid = (start + (9*(n+1))/16);
cilk_spawn parallel_for(start, mid, f, granularity);
parallel_for(mid, end, f, granularity);
cilk_sync;
}
}
// openmp
#elif defined(OPENMP)
#include <omp.h>
#define PAR_GRANULARITY 200000
inline std::string scheduler_name() {
return "OpenMP";
}
inline int num_workers() { return omp_get_max_threads(); }
inline int worker_id() { return omp_get_thread_num(); }
inline void set_num_workers(int n) { omp_set_num_threads(n); }
template <class F>
inline void parallel_for(long start, long end, F f,
long granularity,
bool conservative) {
_Pragma("omp parallel for")
for(long i=start; i<end; i++) f(i);
}
bool in_par_do = false;
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool conservative) {
if (!in_par_do) {
in_par_do = true; // at top level start up tasking
#pragma omp parallel
#pragma omp single
#pragma omp task
left();
#pragma omp task
right();
#pragma omp taskwait
in_par_do = false;
} else { // already started
#pragma omp task
left();
#pragma omp task
right();
#pragma omp taskwait
}
}
template <typename Job>
inline void parallel_run(Job job, int num_threads=0) {
job();
}
// taskparts
#elif defined(TASKPARTS_POSIX)
#include <taskparts/benchmark.hpp>
#include <algorithm>
inline std::string scheduler_name() {
return "taskparts";
}
bool taskparts_launched = false;
inline int num_workers() { return taskparts::perworker::nb_workers(); }
inline int worker_id() { return taskparts::perworker::my_id(); }
using taskparts_scheduler = taskparts::bench_scheduler;
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool) {
if (taskparts_launched) {
taskparts::fork2join<Lf, Rf, taskparts_scheduler>(left, right);
} else {
left();
right();
}
}
template <typename F>
size_t get_granularity(size_t start, size_t end, F f) {
size_t done = 0;
size_t sz = 1;
int ticks = 0;
do {
sz = std::min(sz, end - (start + done));
auto tstart = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < sz; i++) f(start + done + i);
auto tstop = std::chrono::high_resolution_clock::now();
ticks = static_cast<int>((tstop - tstart).count());
done += sz;
sz *= 2;
} while (ticks < 1000 && done < (end - start));
return done;
}
template <typename F>
void parfor_(size_t start, size_t end, F f, size_t granularity,
bool conservative) {
if ((end - start) <= granularity)
for (size_t i = start; i < end; i++) f(i);
else {
size_t n = end - start;
// Not in middle to avoid clashes on set-associative
// caches on powers of 2.
size_t mid = (start + (9 * (n + 1)) / 16);
par_do([&]() { parfor_(start, mid, f, granularity, conservative); },
[&]() { parfor_(mid, end, f, granularity, conservative); },
conservative);
}
}
template <typename F>
inline void parallel_for(long start, long end, F f,
long granularity,
bool conservative) {
if (end <= start) return;
if (granularity == 0) {
long done = get_granularity(start, end, f);
granularity = std::max(done, (end - start) / (128 * num_workers()));
parfor_(start + done, end, f, granularity, conservative);
} else
parfor_(start, end, f, granularity, conservative);
}
template <typename Benchmark,
typename Benchmark_setup=decltype(taskparts::dflt_benchmark_setup),
typename Benchmark_teardown=decltype(taskparts::dflt_benchmark_teardown)
>
auto benchmark_taskparts(const Benchmark& benchmark,
Benchmark_setup benchmark_setup=taskparts::dflt_benchmark_setup,
Benchmark_teardown benchmark_teardown=taskparts::dflt_benchmark_teardown) {
taskparts::benchmark_nativeforkjoin([&] (auto sched) {
taskparts_launched = true;
benchmark(sched);
}, benchmark_setup, benchmark_teardown);
}
// c++
#else
inline std::string scheduler_name() {
return "sequential elision";
}
inline int num_workers() { return 1;}
inline int worker_id() { return 0;}
inline void set_num_workers(int) { ; }
#define PAR_GRANULARITY 1000
template <class F>
inline void parallel_for(long start, long end, F f,
long, // granularity,
bool) { // conservative) {
for (long i=start; i<end; i++) {
f(i);
}
}
template <typename Lf, typename Rf>
inline void par_do(Lf left, Rf right, bool) { // conservative) {
left(); right();
}
template <typename Job>
inline void parallel_run(Job job, int) { // num_threads=0) {
job();
}
#endif
|
singlenode_spgemm.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SINGLENODE_SPGEMM_H_
#define SRC_SINGLENODE_SPGEMM_H_
#include <algorithm>
#include "src/bitvector.h"
#ifdef SPGEMM_NAIVE_SPA
bool cmp_int_spgemm_naive(int i1, int i2) { return i1 < i2; }
template <typename Ta, typename Tb, typename Tc>
void my_dcsrmultcsr(int m, int n, int k, Ta* a, int* ja, int* ia, Tb* b,
int* jb, int* ib, Tc** c, int** jc, int** ic, Tc* c_in,
int* jc_in, int* ic_in, void (*mul_fp)(Ta, Tb, Tc*, void*),
void (*add_fp)(Tc, Tc, Tc*, void*), void* vsp) {
Tc* Crow = reinterpret_cast<Tc*>(_mm_malloc(n * sizeof(Tc), 64));
int* Cidxs = reinterpret_cast<int*>(_mm_malloc(n * sizeof(int), 64));
bool* Cflags = reinterpret_cast<bool*>(_mm_malloc(n * sizeof(bool), 64));
memset(Crow, 0, n * sizeof(Tc));
memset(Cflags, 0, n * sizeof(bool));
// Flag indicating that we should union the result with another CSR mat
bool Cin = (c_in != NULL) && (jc_in != NULL) && (ic_in != NULL);
int nnzc = 0;
for (int Arow = 0; Arow < m; Arow++) {
int Arow_nnz = 0;
// Load values from C_in into dense row vector
if (Cin) {
int row_nnz = 0;
for (int Cnz_id = ic_in[Arow]; Cnz_id < ic_in[Arow + 1]; Cnz_id++) {
int Ccol = jc_in[Cnz_id - 1];
Cidxs[Arow_nnz] = Ccol - 1;
Cflags[Ccol - 1] = true;
row_nnz++;
Arow_nnz++;
}
nnzc += row_nnz;
}
for (int Anz_id = ia[Arow]; Anz_id < ia[Arow + 1]; Anz_id++) {
int Acol = ja[Anz_id - 1];
int row_nnz = 0;
for (int Bnz_id = ib[Acol - 1]; Bnz_id < ib[Acol]; Bnz_id++) {
int Bcol = jb[Bnz_id - 1];
if (!Cflags[Bcol - 1]) {
Cidxs[Arow_nnz] = Bcol - 1;
Cflags[Bcol - 1] = true;
row_nnz++;
Arow_nnz++;
}
}
nnzc += row_nnz;
}
for (int idx = 0; idx < Arow_nnz; idx++) {
Cflags[Cidxs[idx]] = false;
}
}
_mm_free(Cidxs);
(*ic) = reinterpret_cast<int*>(_mm_malloc((m + 1) * sizeof(int), 64));
(*c) = reinterpret_cast<Tc*>(
_mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(Tc), 64));
(*jc) = reinterpret_cast<int*>(
_mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(int), 64));
// Multiply tc = ta * tb
int cnz_cnt = 0;
for (int Arow = 0; Arow < m; Arow++) {
int c_row_nz_start = cnz_cnt;
(*ic)[Arow] = cnz_cnt + 1;
// Load values from C_in into dense row vector
if (Cin) {
for (int Cnz_id = ic_in[Arow]; Cnz_id < ic_in[Arow + 1]; Cnz_id++) {
int Ccol = jc_in[Cnz_id - 1];
(*jc)[cnz_cnt] = Ccol;
cnz_cnt++;
Cflags[Ccol - 1] = 1;
Crow[Ccol - 1] = c_in[Cnz_id - 1];
}
}
for (int Anz_id = ia[Arow]; Anz_id < ia[Arow + 1]; Anz_id++) {
int Acol = ja[Anz_id - 1];
for (int Bnz_id = ib[Acol - 1]; Bnz_id < ib[Acol]; Bnz_id++) {
int Bcol = jb[Bnz_id - 1];
// if(Crow[Bcol-1] == 0.0)
if (Cflags[Bcol - 1] == 0) {
(*jc)[cnz_cnt] = Bcol;
cnz_cnt++;
}
Cflags[Bcol - 1] = 1;
// Crow[Bcol-1] += a[Anz_id-1] * b[Bnz_id-1];
Tc mul_tmp;
mul_fp(a[Anz_id - 1], b[Bnz_id - 1], &mul_tmp, vsp)
Tc add_tmp = Crow[Bcol-1];
add_fp(add_tmp, mul_tmp, &(Crow[Bcol-1]), vsp);
#ifdef COUNT_FLOPS
mul_flops++;
add_flops++;
#endif
}
}
#ifdef SORTED
std::sort((*jc) + c_row_nz_start, (*jc) + cnz_cnt, cmp_int_spgemm_naive);
#endif
for (int Cnz_id = c_row_nz_start; Cnz_id < cnz_cnt; Cnz_id++) {
int Ccol = (*jc)[Cnz_id];
(*c)[Cnz_id] = Crow[Ccol - 1];
Crow[Ccol - 1] = 0.0;
Cflags[Ccol - 1] = 0;
}
}
(*ic)[m] = cnz_cnt + 1;
_mm_free(Crow);
_mm_free(Cflags);
}
#endif
#ifdef SPGEMM_PARALLEL_SPA
bool cmp_int(int i1, int i2) { return i1 < i2; }
template <typename Ta, typename Tb, typename Tc>
void my_dcsrmultcsr(int m, int n, int k, Ta* a, int* ja, int* ia, Tb* b,
int* jb, int* ib, Tc** c, int** jc, int** ic, Tc* c_in,
int* jc_in, int* ic_in, void (*mul_fp)(Ta, Tb, Tc*, void*),
void (*add_fp)(Tc, Tc, Tc*, void*), void* vsp) {
int num_threads = omp_get_max_threads();
assert(num_threads <= omp_get_max_threads());
Tc** Crows = new Tc* [num_threads];
int** Cidxs = new int* [num_threads];
bool** Cflags = new bool* [num_threads];
(*ic) = reinterpret_cast<int*>(_mm_malloc((m + 1) * sizeof(int), 64));
int nchunks = num_threads * 5;
int chunksize = (m + nchunks - 1) / nchunks;
uint64_t * nnzs =
reinterpret_cast<uint64_t*>(_mm_malloc((nchunks + 1) * sizeof(uint64_t), 64));
memset(nnzs, 0, num_threads * sizeof(uint64_t));
// Flag indicating that we should union the result with another CSR mat
bool Cin = (c_in != NULL) && (jc_in != NULL) && (ic_in != NULL);
#pragma omp parallel num_threads(num_threads)
{
int tid = omp_get_thread_num();
Crows[tid] = reinterpret_cast<Tc*>(_mm_malloc(n * sizeof(Tc), 64));
Cidxs[tid] = reinterpret_cast<int*>(_mm_malloc(n * sizeof(int), 64));
Cflags[tid] = reinterpret_cast<bool*>(_mm_malloc(n * sizeof(bool), 64));
memset(Cflags[tid], 0, n * sizeof(bool));
#pragma omp for schedule(dynamic)
for (int chunk = 0; chunk < nchunks; chunk++) {
int start_row = chunk * chunksize;
int end_row = (chunk + 1) * chunksize;
if (end_row > m) end_row = m;
// Determine number of nonzeros
uint64_t nnzmax = 0;
for (int Arow = start_row; Arow < end_row; Arow++) {
int Arow_nnz = 0;
// Load values from C_in into dense row vector
if (Cin) {
int row_nnz = 0;
for (int Cnz_id = ic_in[Arow]; Cnz_id < ic_in[Arow + 1]; Cnz_id++) {
int Ccol = jc_in[Cnz_id - 1];
Cidxs[tid][Arow_nnz] = Ccol - 1;
Cflags[tid][Ccol - 1] = true;
row_nnz++;
Arow_nnz++;
}
nnzmax += row_nnz;
}
for (int Anz_id = ia[Arow]; Anz_id < ia[Arow + 1]; Anz_id++) {
int Acol = ja[Anz_id - 1];
int row_nnz = 0;
for (int Bnz_id = ib[Acol - 1]; Bnz_id < ib[Acol]; Bnz_id++) {
int Bcol = jb[Bnz_id - 1];
if (!Cflags[tid][Bcol - 1]) {
Cidxs[tid][Arow_nnz] = Bcol - 1;
Cflags[tid][Bcol - 1] = true;
row_nnz++;
Arow_nnz++;
}
}
nnzmax += row_nnz;
}
for (int idx = 0; idx < Arow_nnz; idx++) {
Cflags[tid][Cidxs[tid][idx]] = false;
}
}
nnzs[chunk] = nnzmax;
}
_mm_free(Cidxs[tid]);
#pragma omp barrier
#pragma omp master
{
uint64_t nnzc = 0;
for (int chunk = 0; chunk < nchunks; chunk++) {
uint64_t tmp = nnzs[chunk];
nnzs[chunk] = nnzc;
nnzc += tmp;
}
nnzs[nchunks] = nnzc;
(*c) = reinterpret_cast<Tc*>(
_mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(Tc), 64));
(*jc) = reinterpret_cast<int*>(
_mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(int), 64));
}
#pragma omp barrier
#pragma omp for schedule(dynamic)
for (int chunk = 0; chunk < nchunks; chunk++) {
int start_row = chunk * chunksize;
int end_row = (chunk + 1) * chunksize;
if (end_row > m) end_row = m;
// Perform multiplication
int cnz_cnt = nnzs[chunk];
for (int Arow = start_row; Arow < end_row; Arow++) {
int c_row_nz_start = cnz_cnt;
(*ic)[Arow] = cnz_cnt + 1;
// Load values from C_in into dense row vector
if (Cin) {
for (int Cnz_id = ic_in[Arow]; Cnz_id < ic_in[Arow + 1]; Cnz_id++) {
int Ccol = jc_in[Cnz_id - 1];
(*jc)[cnz_cnt] = Ccol;
cnz_cnt++;
Cflags[tid][Ccol - 1] = 1;
Crows[tid][Ccol - 1] = c_in[Cnz_id - 1];
}
}
for (int Anz_id = ia[Arow]; Anz_id < ia[Arow + 1]; Anz_id++) {
int Acol = ja[Anz_id - 1];
for (int Bnz_id = ib[Acol - 1]; Bnz_id < ib[Acol]; Bnz_id++) {
int Bcol = jb[Bnz_id - 1];
if (!Cflags[tid][Bcol - 1]) {
(*jc)[cnz_cnt] = Bcol;
mul_fp(a[Anz_id - 1], b[Bnz_id - 1], &(Crows[tid][Bcol-1]), vsp);
cnz_cnt++;
} else {
Tc tmp_mul;
mul_fp(a[Anz_id - 1], b[Bnz_id - 1], &tmp_mul, vsp);
Tc tmp_add = Crows[tid][Bcol-1];
add_fp(
tmp_add, tmp_mul, &(Crows[tid][Bcol-1]), vsp);
}
Cflags[tid][Bcol - 1] = true;
}
}
#ifdef SORTED
std::sort((*jc) + c_row_nz_start, (*jc) + cnz_cnt, cmp_int);
#endif
int num_del = 0;
for (int Cnz_id = c_row_nz_start; Cnz_id < cnz_cnt; Cnz_id++) {
num_del++;
int Ccol = (*jc)[Cnz_id];
(*c)[Cnz_id] = Crows[tid][Ccol - 1];
Cflags[tid][Ccol - 1] = false;
}
}
} // for each chunk
_mm_free(Crows[tid]);
_mm_free(Cflags[tid]);
} // pragma omp parallel
(*ic)[m] = nnzs[nchunks] + 1;
delete Crows;
delete Cflags;
_mm_free(nnzs);
}
#endif
template <typename Ta, typename Tb, typename Tc>
void my_dcscmultdense(int* row_inds, int* col_ptrs, int* col_indices, Ta* vals,
int num_partitions, int* row_pointers, int* col_starts,
int* edge_pointers, Tb* bvalue, int * bbitvector,
Tc* cvalue, int * cbitvector, int m, int n, int k,
int* nnz, void (*op_mul)(Ta, Tb, Tc*, void*), void (*op_add)(Tc, Tc, Tc*, void*), void* vsp) {
int* new_nnzs = new int[num_partitions];
memset(new_nnzs, 0, num_partitions * sizeof(int));
#pragma omp parallel for
for (int p = 0; p < num_partitions; p++) {
const int* column_offset = col_indices + col_starts[p];
const int* partitioned_row_offset = row_inds + edge_pointers[p];
const Ta* partitioned_val_offset = vals + edge_pointers[p];
const int* col_ptrs_cur = col_ptrs + col_starts[p];
// For each column
for (int j = 0; j < (col_starts[p + 1] - col_starts[p]) - 1; j++) {
int col_index = col_indices[col_starts[p] + j];
// For each B column
// _mm_prefetch((char*)(bvalue + column_offset[j + 4] + jj * k), _MM_HINT_T0);
int nz_idx = col_ptrs_cur[j];
for ( ; nz_idx < col_ptrs_cur[j+1] ; nz_idx++) {
int row_ind = partitioned_row_offset[nz_idx];
Ta Aval = partitioned_val_offset[nz_idx];
for (int jj = 0; jj < n; jj++) {
if (get_bitvector(col_index + jj * k, bbitvector)) {
Tb Bval = bvalue[col_index + jj * k];
if (get_bitvector(row_ind + jj * m, cbitvector)) {
Tc mul_tmp;
op_mul(Aval, Bval, &mul_tmp);
Tc add_tmp = cvalue[row_ind + jj * m];
op_add(add_tmp, mul_tmp, &(cvalue[row_ind + jj * m]));
} else {
op_mul(Aval, Bval, &(cvalue[row_ind + jj * m]));
new_nnzs[p]++;
}
set_bitvector(row_ind + jj * m, cbitvector);
}
}
}
}
}
for (int p = 0; p < num_partitions; p++) {
*nnz += new_nnzs[p];
}
}
#ifdef SPGEMM_PARALLEL_MERGE
template <typename Tc>
void merge(Tc* a, int* ja, int Aend, Tc* b, int* jb, int Bend, Tc* c, int* jc,
int* Cend, void (*add_fp)(Tc, Tc, Tc*, void*), void* vsp) {
// Merge c row and tc row into new_c row
int Astart = 0;
int Bstart = 0;
int cnz_cnt = 0;
while ((Astart < Aend) || (Bstart < Bend)) {
int Acol = (Astart != Aend) ? ja[Astart] : INT_MAX;
int Bcol = (Bstart != Bend) ? jb[Bstart] : INT_MAX;
if (Acol < Bcol) {
c[cnz_cnt] = a[Astart];
jc[cnz_cnt] = Acol;
cnz_cnt++;
Astart++;
} else if (Bcol < Acol) {
c[cnz_cnt] = b[Bstart];
jc[cnz_cnt] = Bcol;
cnz_cnt++;
Bstart++;
} else {
add_fp(a[Astart], b[Bstart], &(c[cnz_cnt]), vsp);
jc[cnz_cnt] = Acol;
cnz_cnt++;
Astart++;
Bstart++;
}
}
*Cend = cnz_cnt;
}
template <typename Tc>
void merge_sort(Tc* c_buf[2], int* jc_buf[2], int* current_buf, int* row_ptrs,
int row_cnt, void (*add_fp)(Tc, Tc, Tc*, void*), void* vsp) {
int cur_row_cnt = row_cnt;
int result_ptr = 0;
while (cur_row_cnt > 1) {
// For each pair
result_ptr = 0;
int new_row_cnt = 0;
for (int r = 0; r < cur_row_cnt; r += 2) {
if (cur_row_cnt - r > 1) {
int Clen = 0;
merge<Tc>(c_buf[(*current_buf)] + row_ptrs[r],
jc_buf[(*current_buf)] + row_ptrs[r],
row_ptrs[r + 1] - row_ptrs[r],
c_buf[(*current_buf)] + row_ptrs[r + 1],
jc_buf[(*current_buf)] + row_ptrs[r + 1],
row_ptrs[r + 2] - row_ptrs[r + 1],
c_buf[1 - (*current_buf)] + result_ptr,
jc_buf[1 - (*current_buf)] + result_ptr, &Clen, add_fp, vsp);
row_ptrs[r / 2] = result_ptr;
row_ptrs[r / 2 + 1] = result_ptr + Clen;
result_ptr += Clen;
} else {
int Clen = (row_ptrs[r + 1] - row_ptrs[r]);
memcpy(c_buf[1 - (*current_buf)] + result_ptr,
c_buf[(*current_buf)] + row_ptrs[r], Clen * sizeof(Tc));
memcpy(jc_buf[1 - (*current_buf)] + result_ptr,
jc_buf[(*current_buf)] + row_ptrs[r], Clen * sizeof(int));
row_ptrs[r / 2] = result_ptr;
row_ptrs[r / 2 + 1] = result_ptr + Clen;
result_ptr += Clen;
}
new_row_cnt++;
}
cur_row_cnt = new_row_cnt;
(*current_buf) = 1 - (*current_buf);
}
}
template <typename Ta, typename Tb, typename Tc>
void my_dcsrmultcsr(int m, int n, int k, Ta* a, int* ja, int* ia, Tb* b,
int* jb, int* ib, Tc** c, int** jc, int** ic, Tc* c_in,
int* jc_in, int* ic_in, void (*mul_fp)(Ta, Tb, Tc*, void*),
void (*add_fp)(Tc, Tc, Tc*, void*), void* vsp) {
#ifndef SORTED
#error Merge kernels require sorted inputs
#endif
int num_threads = omp_get_max_threads();
assert(num_threads <= omp_get_max_threads());
int** Cidxs = new int* [num_threads];
bool** Cflags = new bool* [num_threads];
(*ic) = reinterpret_cast<int*>(_mm_malloc((m + 1) * sizeof(int), 64));
int nchunks = num_threads * 5;
int chunksize = (m + nchunks - 1) / nchunks;
int row_buf_len;
int row_ptr_len;
int* nnzs =
reinterpret_cast<int*>(_mm_malloc((nchunks + 1) * sizeof(int), 64));
int* max_row_ubounds =
reinterpret_cast<int*>(_mm_malloc((nchunks + 1) * sizeof(int), 64));
int* max_row_nums =
reinterpret_cast<int*>(_mm_malloc((nchunks + 1) * sizeof(int), 64));
memset(nnzs, 0, num_threads * sizeof(int));
memset(max_row_ubounds, 0, num_threads * sizeof(int));
memset(max_row_nums, 0, num_threads * sizeof(int));
// Flag indicating that we should union the result with another CSR mat
bool Cin = (c_in != NULL) && (jc_in != NULL) && (ic_in != NULL);
#pragma omp parallel num_threads(num_threads)
{
int tid = omp_get_thread_num();
Cidxs[tid] = reinterpret_cast<int*>(_mm_malloc(n * sizeof(int), 64));
Cflags[tid] = reinterpret_cast<bool*>(_mm_malloc(n * sizeof(bool), 64));
memset(Cflags[tid], 0, n * sizeof(bool));
#pragma omp for schedule(dynamic)
for (int chunk = 0; chunk < nchunks; chunk++) {
int start_row = chunk * chunksize;
int end_row = (chunk + 1) * chunksize;
if (end_row > m) end_row = m;
// Determine number of nonzeros
int nnzmax = 0;
int max_row_ub = 0;
int max_num_rows = 0;
for (int Arow = start_row; Arow < end_row; Arow++) {
int row_ub = 0;
int Arow_nnz = 0;
max_num_rows = std::max(max_num_rows, ia[Arow + 1] - ia[Arow]);
// Load values from C_in into dense row vector
if (Cin) {
row_ub += (ic_in[Arow + 1] - ic_in[Arow]);
int row_nnz = 0;
for (int Cnz_id = ic_in[Arow]; Cnz_id < ic_in[Arow + 1]; Cnz_id++) {
int Ccol = jc_in[Cnz_id - 1];
Cidxs[tid][Arow_nnz] = Ccol - 1;
Cflags[tid][Ccol - 1] = true;
row_nnz++;
Arow_nnz++;
}
nnzmax += row_nnz;
}
for (int Anz_id = ia[Arow]; Anz_id < ia[Arow + 1]; Anz_id++) {
int Acol = ja[Anz_id - 1];
row_ub += (ib[Acol] - ib[Acol - 1]);
int row_nnz = 0;
for (int Bnz_id = ib[Acol - 1]; Bnz_id < ib[Acol]; Bnz_id++) {
int Bcol = jb[Bnz_id - 1];
if (!Cflags[tid][Bcol - 1]) {
Cidxs[tid][Arow_nnz] = Bcol - 1;
Cflags[tid][Bcol - 1] = true;
row_nnz++;
Arow_nnz++;
}
}
nnzmax += row_nnz;
}
(*ic)[Arow] = Arow_nnz;
max_row_ub = std::max(max_row_ub, row_ub);
for (int idx = 0; idx < Arow_nnz; idx++) {
Cflags[tid][Cidxs[tid][idx]] = false;
}
}
nnzs[chunk] = nnzmax;
max_row_ubounds[chunk] = max_row_ub;
max_row_nums[chunk] = max_num_rows;
}
_mm_free(Cidxs[tid]);
#pragma omp barrier
#pragma omp master
{
int nnzc = 0;
row_buf_len = 0;
for (int chunk = 0; chunk < nchunks; chunk++) {
int tmp = nnzs[chunk];
nnzs[chunk] = nnzc;
nnzc += tmp;
row_buf_len = std::max(row_buf_len, max_row_ubounds[chunk]);
row_ptr_len = std::max(row_ptr_len, max_row_nums[chunk]);
}
nnzs[nchunks] = nnzc;
(*c) = reinterpret_cast<Tc*>(
_mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(Tc), 64));
(*jc) = reinterpret_cast<int*>(
_mm_malloc((uint64_t)(nnzc) * (uint64_t)sizeof(int), 64));
}
#pragma omp barrier
// Allocate row buffers
Tc* c_buf[2];
int* jc_buf[2];
c_buf[0] = reinterpret_cast<Tc*>(_mm_malloc(row_buf_len * sizeof(Tc), 64));
c_buf[1] = reinterpret_cast<Tc*>(_mm_malloc(row_buf_len * sizeof(Tc), 64));
jc_buf[0] =
reinterpret_cast<int*>(_mm_malloc(row_buf_len * sizeof(int), 64));
jc_buf[1] =
reinterpret_cast<int*>(_mm_malloc(row_buf_len * sizeof(int), 64));
int* row_ptrs =
reinterpret_cast<int*>(_mm_malloc((row_ptr_len + 1) * sizeof(int), 64));
#pragma omp for schedule(dynamic)
for (int chunk = 0; chunk < nchunks; chunk++) {
int start_row = chunk * chunksize;
int end_row = (chunk + 1) * chunksize;
if (end_row > m) end_row = m;
// Perform multiplication
int cnz_cnt = nnzs[chunk];
for (int Arow = start_row; Arow < end_row; Arow++) {
int buf_nz_cnt = 0;
int c_row_nz_start = cnz_cnt;
int row_cnt = 0;
int Arow_nnz = (*ic)[Arow];
(*ic)[Arow] = cnz_cnt + 1;
if (Cin) {
row_ptrs[row_cnt] = buf_nz_cnt;
for (int Cnz_id = ic_in[Arow]; Cnz_id < ic_in[Arow + 1]; Cnz_id++) {
c_buf[0][buf_nz_cnt] = c_in[Cnz_id - 1];
jc_buf[0][buf_nz_cnt] = jc_in[Cnz_id - 1];
buf_nz_cnt++;
}
row_cnt++;
row_ptrs[row_cnt] = buf_nz_cnt;
}
for (int Anz_id = ia[Arow]; Anz_id < ia[Arow + 1]; Anz_id++) {
int Acol = ja[Anz_id - 1];
Ta Aval = a[Anz_id - 1];
row_ptrs[row_cnt] = buf_nz_cnt;
// Copy B row into c_buf[0] and jc_buf[0]
for (int Bnz_id = ib[Acol - 1]; Bnz_id < ib[Acol]; Bnz_id++) {
mul_fp(Aval, b[Bnz_id - 1], &(c_buf[0][buf_nz_cnt]), vsp);
jc_buf[0][buf_nz_cnt] = jb[Bnz_id - 1];
buf_nz_cnt++;
}
row_cnt++;
}
row_ptrs[row_cnt] = buf_nz_cnt;
cnz_cnt += Arow_nnz;
// Merge sort
int current_buf = 0;
merge_sort<Tc>(c_buf, jc_buf, ¤t_buf, row_ptrs, row_cnt, add_fp);
memcpy((*c) + c_row_nz_start, c_buf[current_buf],
Arow_nnz * sizeof(Tc));
memcpy((*jc) + c_row_nz_start, jc_buf[current_buf],
Arow_nnz * sizeof(int));
}
} // for each chunk
_mm_free(c_buf[0]);
_mm_free(c_buf[1]);
_mm_free(jc_buf[0]);
_mm_free(jc_buf[1]);
_mm_free(row_ptrs);
_mm_free(Cflags[tid]);
} // pragma omp parallel
(*ic)[m] = nnzs[nchunks] + 1;
delete[] Cflags;
_mm_free(nnzs);
_mm_free(max_row_ubounds);
}
#endif
#endif // SRC_SINGLENODE_SPGEMM_H_
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "opencl.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
if (filename) filename[strcspn(filename, "\n\r")] = 0;
char *pos;
if ((pos=strchr(filename, '\r')) != NULL) *pos = '\0';
if ((pos=strchr(filename, '\n')) != NULL) *pos = '\0';
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
indexes[i] = index;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_random_paths(char **paths, int n, int m)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
int index;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
index = rand()%m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char **replace_paths = calloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop;
if(center){
crop = center_crop_image(im, size, size);
} else {
crop = random_augment_image(im, angle, aspect, min, max, size, size);
}
int flip = rand()%2;
if (flip) flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
/*
show_image(im, "orig");
show_image(crop, "crop");
cvWaitKey(0);
*/
//grayscale_image_3c(crop);
free_image(im);
X.vals[i] = crop.data;
X.cols = crop.h*crop.w*crop.c;
}
return X;
}
box_label *read_boxes(char *filename, int *n)
{
//if (filename) filename[strcspn(filename, "\n\r")] = 0;
char *pos;
if ((pos=strchr(filename, '\r')) != NULL) *pos = '\0';
if ((pos=strchr(filename, '\n')) != NULL) *pos = '\0';
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
float x, y, h, w;
int id;
int count = 0;
int size = 64;
box_label *boxes = calloc(size, sizeof(box_label));
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
if(count == size) {
size = size * 2;
boxes = realloc(boxes, size*sizeof(box_label));
}
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = rand()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 90; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .005 || h < .005) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
void load_rle(image im, int *rle, int n)
{
int count = 0;
int curr = 0;
int i,j;
for(i = 0; i < n; ++i){
for(j = 0; j < rle[i]; ++j){
im.data[count++] = curr;
}
curr = 1 - curr;
}
for(; count < im.h*im.w*im.c; ++count){
im.data[count] = curr;
}
}
void or_image(image src, image dest, int c)
{
int i;
for(i = 0; i < src.w*src.h; ++i){
if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1;
}
}
void exclusive_image(image src)
{
int k, j, i;
int s = src.w*src.h;
for(k = 0; k < src.c-1; ++k){
for(i = 0; i < s; ++i){
if (src.data[k*s + i]){
for(j = k+1; j < src.c; ++j){
src.data[j*s + i] = 0;
}
}
}
}
}
box bound_image(image im)
{
int x,y;
int minx = im.w;
int miny = im.h;
int maxx = 0;
int maxy = 0;
for(y = 0; y < im.h; ++y){
for(x = 0; x < im.w; ++x){
if(im.data[y*im.w + x]){
minx = (x < minx) ? x : minx;
miny = (y < miny) ? y : miny;
maxx = (x > maxx) ? x : maxx;
maxy = (y > maxy) ? y : maxy;
}
}
}
box b = {minx, miny, maxx-minx + 1, maxy-miny + 1};
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
return b;
}
void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
int j;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
image mask = resize_image(sized, mw, mh);
truth[i*(mw*mh+1)] = id;
for(j = 0; j < mw*mh; ++j){
truth[i*(mw*mh + 1) + 1 + j] = mask.data[j];
}
++i;
free_image(mask);
free_image(sized);
free(rle);
}
if(i < num_boxes) truth[i*(mw*mh+1)] = -1;
fclose(file);
free_image(part);
}
void fill_truth_mask(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
box b = bound_image(sized);
if(b.w > 0){
image crop = crop_image(sized, b.x, b.y, b.w, b.h);
image mask = resize_image(crop, mw, mh);
truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w;
truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h;
truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w;
truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h;
int j;
for(j = 0; j < mw*mh; ++j){
truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j];
}
truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id;
free_image(crop);
free_image(mask);
++i;
}
free_image(sized);
free(rle);
}
fclose(file);
free_image(part);
}
void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, "raw", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if(count > num_boxes) count = num_boxes;
float x,y,w,h;
int id;
int i;
int sub = 0;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if ((w < .001 || h < .001)) {
++sub;
continue;
}
truth[(i-sub)*5+0] = x;
truth[(i-sub)*5+1] = y;
truth[(i-sub)*5+2] = w;
truth[(i-sub)*5+3] = h;
truth[(i-sub)*5+4] = id;
}
free(boxes);
}
#define NUMCHARS 37
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
//printf("%s %s %d\n", path, labels[i], i);
}
}
if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path);
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_regression_labels_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i,j;
for(i = 0; i < n; ++i){
char labelpath[4096];
find_replace(paths[i], "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".BMP", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPeG", ".txt", labelpath);
find_replace(labelpath, ".Jpeg", ".txt", labelpath);
find_replace(labelpath, ".PNG", ".txt", labelpath);
find_replace(labelpath, ".TIF", ".txt", labelpath);
find_replace(labelpath, ".bmp", ".txt", labelpath);
find_replace(labelpath, ".jpeg", ".txt", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".tif", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
for(j = 0; j < k; ++j){
fscanf(file, "%f", &(y.vals[i][j]));
}
fclose(file);
}
return y;
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth(paths[i], labels, k, y.vals[i]);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
//int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "images", "labels", label);
find_replace(label, ".jpg", ".txt", label);
FILE *file = fopen(label, "r");
if (!file) continue;
//++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
//printf("%d/%d\n", count, n);
return y;
}
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
image get_segmentation_image(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
image get_segmentation_image2(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes+1);
int i;
for(i = 0; i < w*h; ++i){
mask.data[w*h*classes + i] = 1;
}
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
for(i = 0; i < w*h; ++i){
if(part.data[i]) mask.data[w*h*classes + i] = 0;
}
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y.rows = n;
d.y.cols = h*w*classes/div/div;
d.y.vals = calloc(d.X.rows, sizeof(float*));
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes);
//image mask = make_image(orig.w, orig.h, classes+1);
image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect);
if(flip) flip_image(sized_m);
d.y.vals[i] = sized_m.data;
free_image(orig);
free_image(mask);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int div, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (((w/div)*(h/div))+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, w/div, h/div);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (coords+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_mask(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = rand()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*90;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, 5*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
image sized = make_image(w, h, orig.c);
fill_image(sized, .5);
float dw = jitter * orig.w;
float dh = jitter * orig.h;
float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh));
//float scale = rand_uniform(.25, 2);
float scale = 1;
float nw, nh;
if(new_ar < 1){
nh = scale * h;
nw = nh * new_ar;
} else {
nw = scale * w;
nh = nw / new_ar;
}
float dx = rand_uniform(0, w - nw);
float dy = rand_uniform(0, h - nh);
place_image(orig, nw, nh, dx, dy, sized);
random_distort_image(sized, hue, saturation, exposure);
int flip = rand()%2;
if(flip) flip_image(sized);
d.X.vals[i] = sized.data;
fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h);
free_image(orig);
}
free(random_paths);
return d;
}
void *load_thread(void *ptr)
{
//printf("Loading data: %d\n", rand());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == REGRESSION_DATA){
*a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == ISEG_DATA){
*a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.scale, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == INSTANCE_DATA){
*a.d = load_data_mask(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == SEGMENTATION_DATA){
*a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
} else if (a.type == LETTERBOX_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
void *load_threads(void *ptr)
{
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data *buffers = calloc(args.threads, sizeof(data));
pthread_t *threads = calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
void load_data_blocking(load_args args)
{
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
load_thread(ptr);
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) {
error("Thread creation failed");
return 0;
}
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = calloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = calloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = rand()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_regression_labels_paths(paths, n, k);
if(m) free(paths);
return d;
}
data select_data(data *orig, int *inds)
{
data d = {0};
d.shallow = 1;
d.w = orig[0].w;
d.h = orig[0].h;
d.X.rows = orig[0].X.rows;
d.y.rows = orig[0].X.rows;
d.X.cols = orig[0].X.cols;
d.y.cols = orig[0].y.cols;
d.X.vals = calloc(orig[0].X.rows, sizeof(float *));
d.y.vals = calloc(orig[0].y.rows, sizeof(float *));
int i;
for(i = 0; i < d.X.rows; ++i){
d.X.vals[i] = orig[inds[i]].X.vals[i];
d.y.vals[i] = orig[inds[i]].y.vals[i];
}
return d;
}
data *tile_data(data orig, int divs, int size)
{
data *ds = calloc(divs*divs, sizeof(data));
int i, j;
#pragma omp parallel for
for(i = 0; i < divs*divs; ++i){
data d;
d.shallow = 0;
d.w = orig.w/divs * size;
d.h = orig.h/divs * size;
d.X.rows = orig.X.rows;
d.X.cols = d.w*d.h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(j = 0; j < orig.X.rows; ++j){
int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2;
int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2;
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]);
d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data;
}
ds[i] = d;
}
return ds;
}
data resize_data(data orig, int w, int h)
{
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
int i;
d.X.rows = orig.X.rows;
d.X.cols = w*h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(i = 0; i < orig.X.rows; ++i){
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
d.X.vals[i] = resize_image(im, w, h).data;
}
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.w=size;
d.h=size;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center);
d.y = load_labels_paths(paths, n, labels, k, hierarchy);
if(m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = size;
d.h = size;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = calloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
d.w = d1.w;
d.h = d1.h;
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data new = concat_data(d[i], out);
free_data(out);
out = new;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = rand()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i+b*10000][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = rand()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
data copy_data(data d)
{
data c = {0};
c.w = d.w;
c.h = d.h;
c.shallow = 0;
c.num_boxes = d.num_boxes;
c.boxes = d.boxes;
c.X = copy_matrix(d.X);
c.y = copy_matrix(d.y);
return c;
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = calloc(num, sizeof(float *));
r.y.vals = calloc(num, sizeof(float *));
int i;
for(i = 0; i < num; ++i){
int index = rand()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data *split = calloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train;
data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = calloc(train.X.rows, sizeof(float*));
test.X.vals = calloc(test.X.rows, sizeof(float*));
train.y.vals = calloc(train.y.rows, sizeof(float*));
test.y.vals = calloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
GB_unaryop__identity_uint8_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_uint64
// op(A') function: GB_tran__identity_uint8_uint64
// C type: uint8_t
// A type: uint64_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint8_t z = (uint8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_uint64
(
uint8_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB038-truedepseconddimension-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Only the outmost loop can be parallelized in this program.
Data race pair: b[i][j]@65:7 vs. b[i][j-1]@65:15
*/
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i,j;
int len = 1000;
if (argc>1)
len = atoi(argv[1]);
int n=len, m=len;
double b[n][m];
for (i=0;i<n;i++)
#pragma omp parallel for schedule(dynamic)
for (j=1;j<m;j++)
b[i][j]=b[i][j-1];
return 0;
}
|
GB_binop__bor_uint8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__bor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__bor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__bor_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_uint8)
// A*D function (colscale): GB (_AxD__bor_uint8)
// D*A function (rowscale): GB (_DxB__bor_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__bor_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__bor_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_uint8)
// C=scalar+B GB (_bind1st__bor_uint8)
// C=scalar+B' GB (_bind1st_tran__bor_uint8)
// C=A+scalar GB (_bind2nd__bor_uint8)
// C=A'+scalar GB (_bind2nd_tran__bor_uint8)
// C type: uint8_t
// A type: uint8_t
// A pattern? 0
// B type: uint8_t
// B pattern? 0
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) | (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_UINT8 || GxB_NO_BOR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bor_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bor_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bor_uint8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint8_t alpha_scalar ;
uint8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bor_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bor_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bor_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bor_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bor_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB (_bind1st_tran__bor_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB (_bind2nd_tran__bor_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__ceil_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ceil_fp64_fp64)
// op(A') function: GB (_unop_tran__ceil_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = ceil (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ceil (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = ceil (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CEIL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ceil_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = ceil (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = ceil (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ceil_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ark_heat1D_omp.c | /*---------------------------------------------------------------
* Programmer(s): Shelby Lockhart @ LLNL
*---------------------------------------------------------------
* Based on the serial code ark_heat1D.c developed by
* Daniel R. Reynolds @ SMU and parallelized with OpenMP
*---------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2019, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
*---------------------------------------------------------------
* Example problem:
*
* The following test simulates a simple 1D heat equation,
* u_t = k*u_xx + f
* for t in [0, 10], x in [0, 1], with initial conditions
* u(0,x) = 0
* Dirichlet boundary conditions, i.e.
* u_t(t,0) = u_t(t,1) = 0,
* and a point-source heating term,
* f = 1 for x=0.5.
*
* The spatial derivatives are computed using second-order
* centered differences, with the data distributed over N points
* on a uniform spatial grid.
*
* This program solves the problem with either an ERK or DIRK
* method. For the DIRK method, we use a Newton iteration with
* the SUNPCG linear solver, and a user-supplied Jacobian-vector
* product routine.
*
* 100 outputs are printed at equal intervals, and run statistics
* are printed at the end.
*---------------------------------------------------------------*/
/* Header files */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <arkode/arkode_arkstep.h> /* prototypes for ARKStep fcts., consts */
#include <nvector/nvector_openmp.h> /* OpenMP N_Vector types, fcts., macros */
#include <sunlinsol/sunlinsol_pcg.h> /* access to PCG SUNLinearSolver */
#include <sundials/sundials_types.h> /* defs. of realtype, sunindextype, etc */
#ifdef _OPENMP
#include <omp.h> /* OpenMP function defs. */
#endif
#if defined(SUNDIALS_EXTENDED_PRECISION)
#define GSYM "Lg"
#define ESYM "Le"
#define FSYM "Lf"
#else
#define GSYM "g"
#define ESYM "e"
#define FSYM "f"
#endif
/* user data structure */
typedef struct {
sunindextype N; /* number of intervals */
int nthreads; /* number of OpenMP threads */
realtype dx; /* mesh spacing */
realtype k; /* diffusion coefficient */
} *UserData;
/* User-supplied Functions Called by the Solver */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data);
static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y,
N_Vector fy, void *user_data, N_Vector tmp);
/* Private function to check function return values */
static int check_flag(void *flagvalue, const char *funcname, int opt);
/* Main Program */
int main(int argc, char *argv[]) {
/* general problem parameters */
realtype T0 = RCONST(0.0); /* initial time */
realtype Tf = RCONST(1.0); /* final time */
int Nt = 10; /* total number of output times */
realtype rtol = 1.e-6; /* relative tolerance */
realtype atol = 1.e-10; /* absolute tolerance */
UserData udata = NULL;
realtype *data;
sunindextype N = 201; /* spatial mesh size */
realtype k = 0.5; /* heat conductivity */
sunindextype i;
/* general problem variables */
int flag; /* reusable error-checking flag */
N_Vector y = NULL; /* empty vector for storing solution */
SUNLinearSolver LS = NULL; /* empty linear solver object */
void *arkode_mem = NULL; /* empty ARKStep memory structure */
FILE *FID, *UFID;
realtype t, dTout, tout;
int iout, num_threads;
long int nst, nst_a, nfe, nfi, nsetups, nli, nJv, nlcf, nni, ncfn, netf;
/* set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS environment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = (int) strtol(argv[1], NULL, 0);
/* allocate and fill udata structure */
udata = (UserData) malloc(sizeof(*udata));
udata->N = N;
udata->k = k;
udata->dx = RCONST(1.0)/(N-1); /* mesh spacing */
udata->nthreads = num_threads;
/* Initial problem output */
printf("\n1D Heat PDE test problem:\n");
printf(" N = %li\n", (long int) udata->N);
printf(" diffusion coefficient: k = %"GSYM"\n", udata->k);
/* Initialize data structures */
y = N_VNew_OpenMP(N, num_threads); /* Create OpenMP vector for solution */
if (check_flag((void *) y, "N_VNew_OpenMP", 0)) return 1;
N_VConst(0.0, y); /* Set initial conditions */
arkode_mem = ARKStepCreate(NULL, f, T0, y); /* Create the solver memory */
if (check_flag((void *) arkode_mem, "ARKStepCreate", 0)) return 1;
/* Set routines */
flag = ARKStepSetUserData(arkode_mem, (void *) udata); /* Pass udata to user functions */
if (check_flag(&flag, "ARKStepSetUserData", 1)) return 1;
flag = ARKStepSetMaxNumSteps(arkode_mem, 10000); /* Increase max num steps */
if (check_flag(&flag, "ARKStepSetMaxNumSteps", 1)) return 1;
flag = ARKStepSetPredictorMethod(arkode_mem, 1); /* Specify maximum-order predictor */
if (check_flag(&flag, "ARKStepSetPredictorMethod", 1)) return 1;
flag = ARKStepSStolerances(arkode_mem, rtol, atol); /* Specify tolerances */
if (check_flag(&flag, "ARKStepSStolerances", 1)) return 1;
/* Initialize PCG solver -- no preconditioning, with up to N iterations */
LS = SUNLinSol_PCG(y, 0, (int) N);
if (check_flag((void *)LS, "SUNLinSol_PCG", 0)) return 1;
/* Linear solver interface -- set user-supplied J*v routine (no 'jtsetup' required) */
flag = ARKStepSetLinearSolver(arkode_mem, LS, NULL); /* Attach linear solver to ARKStep */
if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1;
flag = ARKStepSetJacTimes(arkode_mem, NULL, Jac); /* Set the Jacobian routine */
if (check_flag(&flag, "ARKStepSetJacTimes", 1)) return 1;
/* Specify linearly implicit RHS, with non-time-dependent Jacobian */
flag = ARKStepSetLinear(arkode_mem, 0);
if (check_flag(&flag, "ARKStepSetLinear", 1)) return 1;
/* output mesh to disk */
FID=fopen("heat_mesh.txt","w");
for (i=0; i<N; i++) fprintf(FID," %.16"ESYM"\n", udata->dx*i);
fclose(FID);
/* Open output stream for results, access data array */
UFID=fopen("heat1D.txt","w");
data = N_VGetArrayPointer(y);
/* output initial condition to disk */
for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM"", data[i]);
fprintf(UFID,"\n");
/* Main time-stepping loop: calls ARKStep to perform the integration, then
prints results. Stops when the final time has been reached */
t = T0;
dTout = (Tf-T0)/Nt;
tout = T0+dTout;
printf(" t ||u||_rms\n");
printf(" -------------------------\n");
printf(" %10.6"FSYM" %10.6f\n", t, sqrt(N_VDotProd(y,y)/N));
for (iout=0; iout<Nt; iout++) {
flag = ARKStepEvolve(arkode_mem, tout, y, &t, ARK_NORMAL); /* call integrator */
if (check_flag(&flag, "ARKStepEvolve", 1)) break;
printf(" %10.6"FSYM" %10.6f\n", t, sqrt(N_VDotProd(y,y)/N)); /* print solution stats */
if (flag >= 0) { /* successful solve: update output time */
tout += dTout;
tout = (tout > Tf) ? Tf : tout;
} else { /* unsuccessful solve: break */
fprintf(stderr,"Solver failure, stopping integration\n");
break;
}
/* output results to disk */
for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM"", data[i]);
fprintf(UFID,"\n");
}
printf(" -------------------------\n");
fclose(UFID);
/* Print some final statistics */
flag = ARKStepGetNumSteps(arkode_mem, &nst);
check_flag(&flag, "ARKStepGetNumSteps", 1);
flag = ARKStepGetNumStepAttempts(arkode_mem, &nst_a);
check_flag(&flag, "ARKStepGetNumStepAttempts", 1);
flag = ARKStepGetNumRhsEvals(arkode_mem, &nfe, &nfi);
check_flag(&flag, "ARKStepGetNumRhsEvals", 1);
flag = ARKStepGetNumLinSolvSetups(arkode_mem, &nsetups);
check_flag(&flag, "ARKStepGetNumLinSolvSetups", 1);
flag = ARKStepGetNumErrTestFails(arkode_mem, &netf);
check_flag(&flag, "ARKStepGetNumErrTestFails", 1);
flag = ARKStepGetNumNonlinSolvIters(arkode_mem, &nni);
check_flag(&flag, "ARKStepGetNumNonlinSolvIters", 1);
flag = ARKStepGetNumNonlinSolvConvFails(arkode_mem, &ncfn);
check_flag(&flag, "ARKStepGetNumNonlinSolvConvFails", 1);
flag = ARKStepGetNumLinIters(arkode_mem, &nli);
check_flag(&flag, "ARKStepGetNumLinIters", 1);
flag = ARKStepGetNumJtimesEvals(arkode_mem, &nJv);
check_flag(&flag, "ARKStepGetNumJtimesEvals", 1);
flag = ARKStepGetNumLinConvFails(arkode_mem, &nlcf);
check_flag(&flag, "ARKStepGetNumLinConvFails", 1);
printf("\nFinal Solver Statistics:\n");
printf(" Internal solver steps = %li (attempted = %li)\n", nst, nst_a);
printf(" Total RHS evals: Fe = %li, Fi = %li\n", nfe, nfi);
printf(" Total linear solver setups = %li\n", nsetups);
printf(" Total linear iterations = %li\n", nli);
printf(" Total number of Jacobian-vector products = %li\n", nJv);
printf(" Total number of linear solver convergence failures = %li\n", nlcf);
printf(" Total number of Newton iterations = %li\n", nni);
printf(" Total number of nonlinear solver convergence failures = %li\n", ncfn);
printf(" Total number of error test failures = %li\n", netf);
/* Clean up and return with successful completion */
N_VDestroy(y); /* Free vectors */
free(udata); /* Free user data */
ARKStepFree(&arkode_mem); /* Free integrator memory */
SUNLinSolFree(LS); /* Free linear solver */
return 0;
}
/*--------------------------------
* Functions called by the solver
*--------------------------------*/
/* f routine to compute the ODE RHS function f(t,y). */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data)
{
UserData udata = (UserData) user_data; /* access problem data */
sunindextype N = udata->N; /* set variable shortcuts */
realtype k = udata->k;
realtype dx = udata->dx;
realtype *Y=NULL, *Ydot=NULL;
realtype c1, c2;
sunindextype i, isource;
Y = N_VGetArrayPointer(y); /* access data arrays */
if (check_flag((void *) Y, "N_VGetArrayPointer", 0)) return 1;
Ydot = N_VGetArrayPointer(ydot);
if (check_flag((void *) Ydot, "N_VGetArrayPointer", 0)) return 1;
N_VConst(0.0, ydot); /* Initialize ydot to zero */
/* iterate over domain, computing all equations */
c1 = k/dx/dx;
c2 = -RCONST(2.0)*k/dx/dx;
isource = N/2;
Ydot[0] = 0.0; /* left boundary condition */
#pragma omp parallel for default(shared) private(i) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++)
Ydot[i] = c1*Y[i-1] + c2*Y[i] + c1*Y[i+1];
Ydot[N-1] = 0.0; /* right boundary condition */
Ydot[isource] += 0.01/dx; /* source term */
return 0; /* Return with success */
}
/* Jacobian routine to compute J(t,y) = df/dy. */
static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y,
N_Vector fy, void *user_data, N_Vector tmp)
{
UserData udata = (UserData) user_data; /* variable shortcuts */
sunindextype N = udata->N;
realtype k = udata->k;
realtype dx = udata->dx;
realtype *V=NULL, *JV=NULL;
realtype c1, c2;
sunindextype i;
V = N_VGetArrayPointer(v); /* access data arrays */
if (check_flag((void *) V, "N_VGetArrayPointer", 0)) return 1;
JV = N_VGetArrayPointer(Jv);
if (check_flag((void *) JV, "N_VGetArrayPointer", 0)) return 1;
N_VConst(0.0, Jv); /* initialize Jv product to zero */
/* iterate over domain, computing all Jacobian-vector products */
c1 = k/dx/dx;
c2 = -RCONST(2.0)*k/dx/dx;
JV[0] = 0.0;
#pragma omp parallel for default(shared) private(i) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++)
JV[i] = c1*V[i-1] + c2*V[i] + c1*V[i+1];
JV[N-1] = 0.0;
return 0; /* Return with success */
}
/*-------------------------------
* Private helper functions
*-------------------------------*/
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer
*/
static int check_flag(void *flagvalue, const char *funcname, int opt)
{
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return 1; }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
return 0;
}
/*---- end of file ----*/
|
mat-sum_gpu.c | /*
This program performs matrix sum on the GPU with
dynamically allocated matrices.
Author: Gleison Souza Diniz Mendonça
Date: 04-01-2015
version 2.0
Run:
ipmacc mat-sum_gpu.c -o mat
./mat matrix-size
*/
#include "BenchmarksUtil.h"
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define PERCENT_DIFF_ERROR_THRESHOLD 0.01
// Initialize matrices.
void init(float *a, float *b, float *c_cpu, float *c_gpu) {
int i, j;
for (i = 0; i < SIZE; ++i) {
for (j = 0; j < SIZE; ++j) {
a[i * SIZE + j] = (float)i + j;
b[i * SIZE + j] = (float)i + j;
c_cpu[i * SIZE + j] = 0.0f;
c_gpu[i * SIZE + j] = 0.0f;
}
}
}
/// matrix sum algorithm GPU
/// s = size of matrix
void sum_GPU(float *a, float *b, float *c) {
int i, j;
#pragma omp target map(to : a[0 : SIZE *SIZE], b[0 : SIZE *SIZE]) \
map(tofrom : c[0 : SIZE *SIZE]) device(DEVICE_ID)
{
#pragma omp parallel for collapse(1)
for (i = 0; i < SIZE; ++i) {
for (j = 0; j < SIZE; ++j) {
c[i * SIZE + j] = a[i * SIZE + j] + b[i * SIZE + j];
}
}
}
}
/// matrix sum algorithm CPU
/// s = size of matrix
void sum_CPU(float *a, float *b, float *c) {
int i, j;
for (i = 0; i < SIZE; ++i) {
for (j = 0; j < SIZE; ++j) {
c[i * SIZE + j] = a[i * SIZE + j] + b[i * SIZE + j];
}
}
}
int compareResults(float *b_cpu, float *b_gpu) {
int i, j, fail;
fail = 0;
for (i = 0; i < SIZE; i++) {
for (j = 0; j < SIZE; j++) {
if (percentDiff(b_cpu[i * SIZE + j], b_gpu[i * SIZE + j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
int main(int argc, char *argv[]) {
double t_start, t_end;
float *a, *b, *c_cpu, *c_gpu;
int fail = 0;
a = (float *)malloc(sizeof(float) * SIZE * SIZE);
b = (float *)malloc(sizeof(float) * SIZE * SIZE);
c_cpu = (float *)malloc(sizeof(float) * SIZE * SIZE);
c_gpu = (float *)malloc(sizeof(float) * SIZE * SIZE);
fprintf(stdout, "<< Matrix Sum >>\n");
init(a, b, c_cpu, c_gpu);
t_start = rtclock();
sum_GPU(a, b, c_gpu);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
sum_CPU(a, b, c_cpu);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(c_cpu, c_gpu);
#endif
free(a);
free(b);
free(c_cpu);
free(c_gpu);
return fail;
}
|
3d25pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=2*Nt-2;t1++) {
lbp=ceild(t1+2,2);
ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(ceild(t1-8,12),ceild(4*t2-Nz-11,24));t3<=min(min(floord(4*Nt+Ny-9,24),floord(2*t1+Ny-3,24)),floord(4*t2+Ny-9,24));t3++) {
for (t4=max(max(ceild(t1-60,64),ceild(4*t2-Nz-115,128)),ceild(24*t3-Ny-115,128));t4<=min(min(min(floord(4*Nt+Nx-9,128),floord(2*t1+Nx-3,128)),floord(4*t2+Nx-9,128)),floord(24*t3+Nx+11,128));t4++) {
for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(128*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) {
for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
core_slacpy.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlacpy.c, normal z -> s, Fri Sep 28 17:38:19 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "plasma_internal.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_lacpy
*
* Copies all or part of a two-dimensional matrix A to another matrix B.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaGeneral: entire A,
* - PlasmaUpper: upper triangle,
* - PlasmaLower: lower triangle.
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] m
* The number of rows of the matrices A and B.
* m >= 0.
*
* @param[in] n
* The number of columns of the matrices A and B.
* n >= 0.
*
* @param[in] A
* The m-by-n matrix to copy.
*
* @param[in] lda
* The leading dimension of the array A.
* lda >= max(1,m).
*
* @param[out] B
* The m-by-n copy of the matrix A.
* On exit, B = A ONLY in the locations specified by uplo.
*
* @param[in] ldb
* The leading dimension of the array B.
* ldb >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_slacpy(plasma_enum_t uplo, plasma_enum_t transa,
int m, int n,
const float *A, int lda,
float *B, int ldb)
{
if (transa == PlasmaNoTrans) {
LAPACKE_slacpy_work(LAPACK_COL_MAJOR,
lapack_const(uplo),
m, n,
A, lda,
B, ldb);
}
else if (transa == PlasmaTrans) {
switch (uplo) {
case PlasmaUpper:
for (int i = 0; i < imin(m, n); i++)
for (int j = i; j < n; j++)
B[j + i*ldb] = A[i + j*lda];
break;
case PlasmaLower:
for (int i = 0; i < m; i++)
for (int j = 0; j <= imin(i, n); j++)
B[j + i*ldb] = A[i + j*lda];
break;
case PlasmaGeneral:
for (int i = 0; i < m; i++)
for (int j = 0; j < n; j++)
B[j + i*ldb] = A[i + j*lda];
break;
}
}
else {
switch (uplo) {
case PlasmaUpper:
for (int i = 0; i < imin(m, n); i++)
for (int j = i; j < n; j++)
B[j + i*ldb] = (A[i + j*lda]);
break;
case PlasmaLower:
for (int i = 0; i < m; i++)
for (int j = 0; j <= imin(i, n); j++)
B[j + i*ldb] = (A[i + j*lda]);
break;
case PlasmaGeneral:
for (int i = 0; i < m; i++)
for (int j = 0; j < n; j++)
B[j + i*ldb] = (A[i + j*lda]);
break;
}
}
}
/******************************************************************************/
void plasma_core_omp_slacpy(plasma_enum_t uplo, plasma_enum_t transa,
int m, int n,
const float *A, int lda,
float *B, int ldb,
plasma_sequence_t *sequence, plasma_request_t *request)
{
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:B[0:ldb*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_slacpy(uplo, transa,
m, n,
A, lda,
B, ldb);
}
}
|
GB_AxB_saxpy_parallel.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy_parallel: C<M>=A*B, C=A*B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Parallel matrix-matrix multiply, A*B with optional mask M, using the saxpy
// method. This method is used by GrB_mxm, GrB_vxm, and GrB_mxv. For both of
// the latter two methods, B on input will be an nrows-by-1 column vxector.
// The strategy is to "slice" (or partition) B, as B = [B0 B1 ... B(t-1)] if
// there are t threads. Then each thread k computes C(k) = A*B(k), and then
// the result is concatenated, as C = [C0 C1 ... C(t-1)].
// Each thread k computes an independent output matrix C(k), doing both its
// analysis and numeric phases.
// This strategy works well for OpenMP, but it could also be written in a
// purely inspector+executor style, like the GB_AxB_dot* methods. Those
// methods do the analysis in parallel, and first determine the size of the
// output matrix C. Then a parallel cumulative sum is computed, and the entire
// output matrix is allocated. Then each task of the the numeric phase
// computes its part of the result C, without the need for any memory
// allocation by individual threads.
// This function, and the matrices C, M, A, and B are all CSR/CSC agnostic.
// For this discussion, suppose they are CSC, with vlen = # of rows, and vdim =
// # of columns.
// A*B is being computed, and the vector dimension of A must be identical to
// the vector length of B (as if both A and B are CSC matrices, and the number
// of columns of A is the same as the number of rows of B).
// The output matrix C = *Chandle has not been allocated, so C is NULL on
// input. The mask M is optional.
// The semiring defines C=A*B. flipxy modifies how the semiring multiply
// operator is applied. If false, then fmult(aik,bkj) is computed. If true,
// then the operands are swapped, and fmult(bkj,aij) is done instead.
// AxB_method selects the method to use:
// GxB_DEFAULT: the method is selected automatically
// GxB_AxB_GUSTAVSON: Gustavson's method for A*B
// GxB_AxB_HEAP: heap method for A*B
// GxB_AxB_HASH: hash method for A*B (FUTURE)
// The dot product method does not use this function.
// AxB_method_used reports the method actually chosen. This is for
// informational purposes only, so if a parallel C=A*B splits the work into
// multiple submatrix multiplications, and uses different methods on each
// submatrix, then AxB_method_used is the method chosen by thread zero.
// FUTURE:: hash-based method, and multi-phase Gustavson and Heap methods,
// which do not do any memory allocations in parallel, but instead use an
// inspector+executur style (like GB_AxB_dot*). This should work better on the
// GPU.
#include "GB_mxm.h"
#include "GB_Sauna.h"
#define GB_FREE_WORK \
{ \
GB_FREE_MEMORY (Slice, nthreads+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (Bflops, bnvec+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (Bflops_per_entry, bnz+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (AxB_methods_used, nthreads, sizeof (GrB_Desc_Value)) ; \
GB_FREE_MEMORY (bjnz_max, nthreads, sizeof (int64_t)) ; \
GB_FREE_MEMORY (Sauna_ids, nthreads, sizeof (int)) ; \
if (Cslice != NULL) \
{ \
for (int tid = 0 ; tid < nthreads ; tid++) \
{ \
GB_MATRIX_FREE (& (Cslice [tid])) ; \
} \
} \
if (Bslice != NULL) \
{ \
for (int tid = 0 ; tid < nthreads ; tid++) \
{ \
GB_MATRIX_FREE (& (Bslice [tid])) ; \
} \
} \
GB_FREE_MEMORY (Cslice, nthreads, sizeof (GrB_Matrix)) ; \
GB_FREE_MEMORY (Bslice, nthreads, sizeof (GrB_Matrix)) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_MATRIX_FREE (Chandle) ; \
}
GrB_Info GB_AxB_saxpy_parallel // parallel matrix-matrix multiply
(
GrB_Matrix *Chandle, // output matrix, NULL on input
GrB_Matrix M, // optional mask matrix
const bool Mask_comp, // if true, use !M
const GrB_Matrix A, // input matrix A
const GrB_Matrix B, // input matrix B
const GrB_Semiring semiring, // semiring that defines C=A*B
const bool flipxy, // if true, do z=fmult(b,a) vs fmult(a,b)
const GrB_Desc_Value AxB_method,// for auto vs user selection of methods
GrB_Desc_Value *AxB_method_used,// method selected by thread zero
bool *mask_applied, // if true, mask was applied
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Chandle != NULL) ; // C = (*Chandle) is NULL
ASSERT (*Chandle == NULL) ;
ASSERT_OK_OR_NULL (GB_check (M, "M for parallel A*B", GB0)) ;
ASSERT_OK (GB_check (A, "A for parallel A*B", GB0)) ;
ASSERT_OK (GB_check (B, "B for parallel A*B", GB0)) ;
ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ;
ASSERT_OK (GB_check (semiring, "semiring for parallel A*B", GB0)) ;
ASSERT (AxB_method_used != NULL) ;
GrB_Info info ;
//--------------------------------------------------------------------------
// get A and B
//--------------------------------------------------------------------------
if (B->nvec_nonempty < 0)
{
B->nvec_nonempty = GB_nvec_nonempty (B, NULL) ;
}
if (A->nvec_nonempty < 0)
{
A->nvec_nonempty = GB_nvec_nonempty (A, NULL) ;
}
int64_t anz = GB_NNZ (A) ;
int64_t bnvec = B->nvec ;
int64_t bnz = GB_NNZ (B) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
// nthreads may be reduced after the flopcount is computed.
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + bnz, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// initialize workspace
//--------------------------------------------------------------------------
int64_t *restrict Slice = NULL ;
int64_t *restrict Bflops = NULL ;
int64_t *restrict Bflops_per_entry = NULL ;
// workspaces each of size nthreads:
GrB_Desc_Value *restrict AxB_methods_used = NULL ;
int64_t *restrict bjnz_max = NULL ;
int *Sauna_ids = NULL ;
GrB_Matrix *restrict Cslice = NULL ;
GrB_Matrix *restrict Bslice = NULL ;
//==========================================================================
// sequential C<M>=A*B
//==========================================================================
if (nthreads == 1)
{
// select the method
int64_t bjnz1max ;
GB_AxB_select (A, B, semiring, AxB_method, AxB_method_used, &bjnz1max) ;
// acquire a Sauna if Gustavson's method is being used
int Sauna_id = -2 ;
if (*AxB_method_used == GxB_AxB_GUSTAVSON)
{
GB_OK (GB_Sauna_acquire (1, &Sauna_id, AxB_method_used, Context)) ;
}
// C<M>=A*B
GrB_Info info1 = GB_AxB_saxpy_sequential (Chandle, M, Mask_comp, A, B,
semiring, flipxy, *AxB_method_used, bjnz1max, true, mask_applied,
Sauna_id) ;
// release the Sauna for Gustavson's method
if (*AxB_method_used == GxB_AxB_GUSTAVSON)
{
// info is reset, so info1 is used above
GB_OK (GB_Sauna_release (1, &Sauna_id)) ;
}
return ((info1 == GrB_OUT_OF_MEMORY) ? GB_OUT_OF_MEMORY : info1) ;
}
//==========================================================================
// parallel C<M>=A*B
//==========================================================================
// The # of threads may be reduced, if the problem small, even to
// nthreads=1. But so far, for now, nthreads > 1.
ASSERT (nthreads > 1) ;
//--------------------------------------------------------------------------
// count the flops and determine # of threads to use
//--------------------------------------------------------------------------
int64_t total_flops ;
bool fine_slice = (nthreads > bnvec) ;
bool flopresult ;
if (!fine_slice)
{
//----------------------------------------------------------------------
// slice B by flops
//----------------------------------------------------------------------
// Slice B so that each slice has a balanced amount of flops, to
// compute its slice of C. Each thread gets enough columns of B so
// that it has roughly total_flops / nthreads work to do. Individual
// columns are not sliced, so the final step to compute C is a
// concatenation, not as summation. This should give a very good load
// balance where there are enough columns of B, but at the cost of a
// more expensive symbolic analysis, taking O(bnz) time. The analysis
// is itself fully parallel, however. This method cannot parallelize
// A*B when B is a single column (GrB_mxv or GrB_vxm).
// thread tid will do columns Slice [tid] to Slice [tid+1]-1
// note that Bflops is initialized to zero
GB_CALLOC_MEMORY (Bflops, bnvec+1, sizeof (int64_t)) ;
if (Bflops == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
// Bflops [k] = # of flops to compute A*B(:,j) where j is the kth
// vector in B
GB_OK (GB_AxB_flopcount (&flopresult, Bflops, NULL,
(Mask_comp) ? NULL : M, A, B, 0, Context)) ;
// reduce # of threads, based on flop count and the chunk size
total_flops = Bflops [bnvec] ;
}
else
{
//----------------------------------------------------------------------
// fine slice of B by flops (split columns of B)
//----------------------------------------------------------------------
// Slice B so that each slice has nearly exactly balanced amount of
// flops to compute its slice of C. Each thread gets exactly the
// number of entries so that it does total_flops/nthreads work (rounded
// to the nearest number of entries in B).
// note that Bflops_per_entry is initialized to zero
GB_CALLOC_MEMORY (Bflops_per_entry, bnz+1, sizeof (int64_t)) ;
if (Bflops_per_entry == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
// Bflops_per_entry [p] = # of flops to compute A(:,k)*B(k,j)
// where B(k,j) is in Bi [p] and Bx [p].
GB_OK (GB_AxB_flopcount (&flopresult, NULL, Bflops_per_entry,
(Mask_comp) ? NULL : M, A, B, 0, Context)) ;
// reduce # of threads, based on flop count and the chunk size
total_flops = Bflops_per_entry [bnz] ;
}
//--------------------------------------------------------------------------
// find the size of each slice
//--------------------------------------------------------------------------
nthreads = GB_nthreads (total_flops, chunk, nthreads_max) ;
bool ok_pslice ;
if (!fine_slice)
{
// slice B by the flops needed for each vector
ok_pslice = GB_pslice (&Slice, Bflops, bnvec, nthreads) ;
}
else
{
// slice B by the flops needed for each entry
ok_pslice = GB_pslice (&Slice, Bflops_per_entry, bnz, nthreads) ;
}
// free workspace
GB_FREE_MEMORY (Bflops, bnvec+1, sizeof (int64_t)) ;
GB_FREE_MEMORY (Bflops_per_entry, bnz+1, sizeof (int64_t)) ;
if (!ok_pslice)
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// discard the mask if it's too costly to use
//--------------------------------------------------------------------------
if (M != NULL && total_flops < GB_NNZ (M))
{
// The mask is too dense; discard it. mask_applied will be false.
M = NULL ;
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_CALLOC_MEMORY (AxB_methods_used, nthreads, sizeof (GrB_Desc_Value)) ;
GB_CALLOC_MEMORY (bjnz_max, nthreads, sizeof (int64_t)) ;
GB_CALLOC_MEMORY (Sauna_ids, nthreads, sizeof (int)) ;
GB_CALLOC_MEMORY (Cslice, nthreads, sizeof (GrB_Matrix)) ;
GB_CALLOC_MEMORY (Bslice, nthreads, sizeof (GrB_Matrix)) ;
if (AxB_methods_used == NULL || bjnz_max == NULL || Sauna_ids == NULL
|| Cslice == NULL || Bslice == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// construct each slice of B
//--------------------------------------------------------------------------
// If the problem is small enough so that nthreads has been reduced to 1,
// B is not sliced.
if (nthreads > 1)
{
if (fine_slice)
{
GB_OK (GB_fine_slice (B, nthreads, Slice, Bslice, Context)) ;
}
else
{
GB_OK (GB_slice (B, nthreads, Slice, Bslice, Context)) ;
}
}
//--------------------------------------------------------------------------
// select the method for each slice
//--------------------------------------------------------------------------
bool any_Gustavson = false ;
#pragma omp parallel for num_threads(nthreads) schedule(static,1) \
reduction(||:any_Gustavson)
for (int tid = 0 ; tid < nthreads ; tid++)
{
GrB_Desc_Value thread_method_to_use ;
GB_AxB_select (A, (nthreads == 1) ? B : Bslice [tid], semiring,
AxB_method, &thread_method_to_use, &(bjnz_max [tid])) ;
AxB_methods_used [tid] = thread_method_to_use ;
// collect all thread-specific info
any_Gustavson = any_Gustavson ||
(thread_method_to_use == GxB_AxB_GUSTAVSON) ;
}
(*AxB_method_used) = AxB_methods_used [0] ;
//--------------------------------------------------------------------------
// acquire the Saunas for each thread that needs it
//--------------------------------------------------------------------------
if (any_Gustavson)
{
// at least one thread needs a Sauna
GB_OK (GB_Sauna_acquire (nthreads, Sauna_ids, AxB_methods_used,
Context)) ;
}
else
{
// no thread needs a Sauna
for (int tid = 0 ; tid < nthreads ; tid++)
{
Sauna_ids [tid] = -2 ;
}
}
//--------------------------------------------------------------------------
// compute each slice of C = A*B with optional mask M
//--------------------------------------------------------------------------
// This is the only parallel region in which each thread allocates memory.
// The memory space is not known until the thread determines the size of
// its own output, in its analysis phase. Note the "reduction(&&:ok)"
// clause. This is the only place where a clause like that apppears in
// SuiteSparse:GraphBLAS. This could be removed if C=A*B were to be
// computed with an inspector+exector style of algorithm.
// B has been "sliced"; in MATLAB notation, B = [B0 B1 B2 ... B(t-1] if
// there are t threads. Then each k thread computes its own Ck = A*Bk,
// and the results are concatenated below, as C = [C0 C1 ... C(t-1)].
// If a 'fine slice' was used for B, then C = C0+C1+...+C(t-1) must be
// computed.
// for all threads in parallel, with no synchronization except for these
// boolean reductions:
bool ok = true ; // false if any thread's malloc or realloc fails
bool panic = false ; // true if any critical section fails
bool allmask = true ; // true if all threads apply the mask
#pragma omp parallel for num_threads(nthreads) schedule(static,1) \
reduction(&&:allmask) reduction(||:panic) \
reduction(&&:ok)
for (int tid = 0 ; tid < nthreads ; tid++)
{
// each thread allocates its output, using malloc and realloc
bool thread_mask_applied = false ;
GrB_Info thread_info = GB_AxB_saxpy_sequential (&(Cslice [tid]), M,
Mask_comp, A, (nthreads == 1) ? B : Bslice [tid], semiring,
flipxy, AxB_methods_used [tid], bjnz_max [tid],
false, &thread_mask_applied, Sauna_ids [tid]) ;
// collect all thread-specific info
ok = ok && (thread_info == GrB_SUCCESS) ;
allmask = allmask && (thread_mask_applied) ;
panic = panic || (thread_info == GrB_PANIC) ;
}
//--------------------------------------------------------------------------
// check error conditions
//--------------------------------------------------------------------------
// panic if a critical section fails
if (panic) return (GrB_PANIC) ;
// check the return info from all the threads
if (!ok)
{
// out of memory
if (any_Gustavson)
{
// at least one thread used a Sauna; free and release all Sauna
// workspaces
for (int tid = 0 ; tid < nthreads ; tid++)
{
int Sauna_id = Sauna_ids [tid] ;
if (Sauna_id >= 0)
{
GB_Sauna_free (Sauna_id) ;
}
}
GB_OK (GB_Sauna_release (nthreads, Sauna_ids)) ;
}
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// release the Saunas
//--------------------------------------------------------------------------
if (any_Gustavson)
{
// at least one thread used a Sauna
GB_OK (GB_Sauna_release (nthreads, Sauna_ids)) ;
}
//--------------------------------------------------------------------------
// check if all threads applied the mask
//--------------------------------------------------------------------------
// if all threads applied the mask to their slices, then GB_accum_mask does
// not need to apply it to the concatenated C in GB_AxB_meta. If just some
// of them did, then GB_accum_mask needs to apply the mask again.
(*mask_applied) = allmask ;
//--------------------------------------------------------------------------
// concatenate or sum the slices of C
//--------------------------------------------------------------------------
// Each slice Cslice [tid] has the same dimensions and type as C. C is
// stored by column.
if (nthreads == 1)
{
// one thread, so only one slice: just copy Cslice[0] to C
(*Chandle) = Cslice [0] ;
Cslice [0] = NULL ;
}
else if (fine_slice)
{
// C = sum (Cslice [0..nthreads-1]). Adjacent slices of C can share
// columns, which must be summed. Columns in the middle of each slice
// are concatenated horizontally.
GB_OK (GB_hcat_fine_slice (Chandle, nthreads, Cslice, semiring->add,
Sauna_ids, Context)) ;
}
else
{
// C = [Cslice(0) Cslice(1) ... Cslice(nthreads-1)] concatenatied
// horizontally. Each slice contains entries that appear in a unique
// and contiguous subset of the columns of C.
GB_OK (GB_hcat_slice (Chandle, nthreads, Cslice, Context)) ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
ASSERT_OK (GB_check (*Chandle, "C for parallel A*B", GB0)) ;
return (GrB_SUCCESS) ;
}
|
exercise2.c | /*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file exercise2.c
* @author Alessandro Capotondi
* @date 27 Mar 2020
* @brief Exercise 2
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <stdio.h>
#include <omp.h>
#include "utils.h"
/**
* @brief EX 2 - Data parallelism: parallel loop ** 1 to 16 THREADS **
*
* a) Parallelize loop w/static scheduling
* b) Parallelize loop w/dynamic scheduling for chunks = NITER/NTHR (same as static) and 1 (finest granularity)
* c) Same as 2a + 2b, with 1<<20 loop iterations and work(10)
*
* @return void
*/
void exercise()
{
#if 0 //2a
#pragma omp parallel for schedule(static) num_threads(NTHREADS)
for(int i=0; i< 1<<10; i++)
{
DEBUG_PRINT("%hu: I am executing iteration %hu!\n", omp_get_thread_num(), i);
work(1<<10);
DEBUG_PRINT("%hu: Done with iteration %hu!\n", omp_get_thread_num(), i);
}
#endif
#if 0 //2b
int M = (1<<10) / NTHREADS;
//#pragma omp parallel for schedule(dynamic,M) num_threads(NTHREADS)
#pragma omp parallel for schedule(dynamic, 1) num_threads(NTHREADS)
for(int i=0; i< 1<<10; i++)
{
DEBUG_PRINT("%hu: I am executing iteration %hu!\n", omp_get_thread_num(), i);
work(1<<10);
DEBUG_PRINT("%hu: Done with iteration %hu!\n", omp_get_thread_num(), i);
}
#endif
#if 1 //2c
int M = (1 << 20) / NTHREADS;
//#pragma omp parallel for schedule(static) num_threads(NTHREADS)
//#pragma omp parallel for schedule(dynamic,M) num_threads(NTHREADS)
#pragma omp parallel for schedule(dynamic, 1) num_threads(NTHREADS)
for (int i = 0; i < 1 << 20; i++)
{
DEBUG_PRINT("%hu: I am executing iteration %hu!\n", omp_get_thread_num(), i);
work(10);
DEBUG_PRINT("%hu: Done with iteration %hu!\n", omp_get_thread_num(), i);
}
#endif
}
|
oracle12c_fmt_plug.c | /*
* This software is Copyright (c) 2015, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* https://www.trustwave.com/Resources/SpiderLabs-Blog/Changes-in-Oracle-Database-12c-password-hashes/
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_oracle12c;
#elif FMT_REGISTERS_H
john_register_one(&fmt_oracle12c);
#else
#include <openssl/sha.h>
#include <string.h>
#include "arch.h"
//#undef SIMD_COEF_64
//#undef SIMD_PARA_SHA512
//#undef _OPENMP
#include "misc.h"
#include "memory.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "sha2.h"
#include "pbkdf2_hmac_sha512.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Oracle12C"
#define FORMAT_NAME ""
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR
#endif
#define PLAINTEXT_LENGTH 125 // XXX
#define CIPHERTEXT_LENGTH 160
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(ARCH_WORD_32)
#define BINARY_SIZE 64
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64 * SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64 * SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define FORMAT_TAG "$oracle12c$"
#define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
static struct fmt_tests tests[] = {
{"$oracle12c$e3243b98974159cc24fd2c9a8b30ba62e0e83b6ca2fc7c55177c3a7f82602e3bdd17ceb9b9091cf9dad672b8be961a9eac4d344bdba878edc5dcb5899f689ebd8dd1be3f67bff9813a464382381ab36b", "epsilon"},
{NULL}
};
static struct custom_salt {
int saltlen;
unsigned char salt[16 + 22 + 1];
} *cur_salt;
#ifdef SIMD_COEF_64
static char (*saved_key)[SHA_BUF_SIZ*sizeof(ARCH_WORD_64)];
#else
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
#endif
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
static int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p = ciphertext;
if (strncasecmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH))
return 0;
if (strlen(ciphertext) > (FORMAT_TAG_LENGTH + CIPHERTEXT_LENGTH))
return 0;
p = strrchr(ciphertext, '$');
if (!p)
return 0;
p = p + 1;
if (strlen(p) != (BINARY_SIZE * 2 + 32))
return 0;
if (!ishexlc(p))
goto error;
return 1;
error:
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *p;
int i;
memset(&cs, 0, sizeof(cs));
p = ciphertext + FORMAT_TAG_LENGTH + 2 * BINARY_SIZE;
// AUTH_VFR_DATA is variable, and 16 bytes in length
for(i = 0; i < 16; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])];
strncpy((char*)cs.salt + 16, "AUTH_PBKDF2_SPEEDY_KEY", 22); // add constant string to the salt
cs.saltlen = 16 + 22;
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
int i;
char *p;
p = ciphertext + FORMAT_TAG_LENGTH;
for (i = 0; i < BINARY_SIZE && *p; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static int crypt_all(int *pcount, struct db_salt *salt)
{
int index;
const int count = *pcount;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
#endif
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
{
SHA512_CTX ctx;
int i = 0;
#if SIMD_COEF_64
int lens[SSE_GROUP_SZ_SHA512];
unsigned char *pin[SSE_GROUP_SZ_SHA512];
union {
ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA512];
unsigned char *poutc;
} x;
for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
x.pout[i] = (ARCH_WORD_32*)(crypt_out[index+i]);
}
pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt,
cur_salt->saltlen, 4096, &(x.poutc), BINARY_SIZE, 0);
#else
pbkdf2_sha512((const unsigned char*)saved_key[index],
strlen(saved_key[index]), cur_salt->salt,
cur_salt->saltlen, 4096,
(unsigned char*)crypt_out[index], BINARY_SIZE, 0);
#endif
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (i = 0; i < MAX_KEYS_PER_CRYPT; i++)
#endif
{
SHA512_Init(&ctx);
SHA512_Update(&ctx, (unsigned char*)crypt_out[index + i], BINARY_SIZE);
SHA512_Update(&ctx, cur_salt->salt, 16); // AUTH_VFR_DATA first 16 bytes
SHA512_Final((unsigned char*)crypt_out[index + i], &ctx);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_oracle12c = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <rabit/rabit.h>
#include <xgboost/base.h>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "../../src/common/span.h"
#include "../../src/common/group_data.h"
#include "../../src/common/host_device_vector.h"
namespace xgboost {
// forward declare learner.
class LearnerImpl;
// forward declare dmatrix.
class DMatrix;
/*! \brief data type accepted by xgboost interface */
enum DataType {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of rows in the data */
uint64_t num_row_{0};
/*! \brief number of columns in the data */
uint64_t num_col_{0};
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0};
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_;
/*!
* \brief specified root index of each instance,
* can be used for multi task setting
*/
std::vector<bst_uint> root_index_;
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_uint> group_ptr_;
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_;
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_;
/*! \brief version flag, used to check version of this info */
static const int kVersion = 3;
/*! \brief version that contains qid field */
static const int kVersionWithQid = 2;
/*! \brief default constructor */
MetaInfo() = default;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*!
* \brief Get the root index of i-th instance.
* \param i Instance index.
* \return The pre-defined root index of i-th instance.
*/
inline unsigned GetRoot(size_t i) const {
return !root_index_.empty() ? root_index_[i] : 0U;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
/*!
* \brief Set information in the meta info with array interface.
* \param key The key of the information.
* \param interface_str String representation of json format array interface.
*/
void SetInfo(const char* key, std::string const& interface_str);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_uint index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
Entry(bst_uint index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<size_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid{};
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
/*! \brief get i-th row from the batch */
inline Inst operator[](size_t i) const {
const auto& data_vec = data.HostVector();
const auto& offset_vec = offset.HostVector();
size_t size;
// in distributed mode, some partitions may not get any instance for a feature. Therefore
// we should set the size as zero
if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) {
size = 0;
} else {
size = offset_vec[i + 1] - offset_vec[i];
}
return {data_vec.data() + offset_vec[i],
static_cast<Inst::index_type>(size)};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return number of instance in the page */
inline size_t Size() const {
return offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
SparsePage GetTranspose(int num_columns) const {
SparsePage transpose;
common::ParallelGroupBuilder<Entry> builder(&transpose.offset.HostVector(),
&transpose.data.HostVector());
const int nthread = omp_get_max_threads();
builder.InitBudget(num_columns, nthread);
long batch_size = static_cast<long>(this->Size()); // NOLINT(*)
#pragma omp parallel for default(none) shared(batch_size, builder) schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (const auto& entry : inst) {
builder.AddBudget(entry.index, tid);
}
}
builder.InitStorage();
#pragma omp parallel for default(none) shared(batch_size, builder) schedule(static)
for (long i = 0; i < batch_size; ++i) { // NOLINT(*)
int tid = omp_get_thread_num();
auto inst = (*this)[i];
for (const auto& entry : inst) {
builder.Push(
entry.index,
Entry(static_cast<bst_uint>(this->base_rowid + i), entry.fvalue),
tid);
}
}
return transpose;
}
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
#pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
}
}
/*!
* \brief Push row block into the page.
* \param batch the row batch.
*/
void Push(const dmlc::RowBlock<uint32_t>& batch);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
/*!
* \brief Push one instance into page
* \param inst an instance row
*/
void Push(const Inst &inst);
size_t Size() { return offset.Size() - 1; }
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class EllpackPageImpl;
/*!
* \brief A page stored in ELLPACK format.
*
* This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid
* including CUDA-specific implementation details in the header.
*/
class EllpackPage {
public:
explicit EllpackPage(DMatrix* dmat);
~EllpackPage();
const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); }
private:
std::unique_ptr<EllpackPageImpl> impl_;
};
template<typename T>
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() = default;
virtual T& operator*() = 0;
virtual const T& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag;
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
T& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator& rhs) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(begin_iter) {}
BatchIterator<T> begin() { return begin_iter_; }
BatchIterator<T> end() { return BatchIterator<T>(nullptr); }
private:
BatchIterator<T> begin_iter_;
};
/*!
* \brief This is data structure that user can pass to DMatrix::Create
* to create a DMatrix for training, user can create this data structure
* for customized Data Loading on single machine.
*
* On distributed setting, usually an customized dmlc::Parser is needed instead.
*/
template<typename T>
class DataSource : public dmlc::DataIter<T> {
public:
/*!
* \brief Meta information about the dataset
* The subclass need to be able to load this correctly from data.
*/
MetaInfo info;
};
/*!
* \brief Internal data structured used by XGBoost during training.
* There are two ways to create a customized DMatrix that reads in user defined-format.
*
* - Provide a dmlc::Parser and pass into the DMatrix::Create
* - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by DMLC_REGISTER_DATA_PARSER;
* - This works best for user defined data input source, such as data-base, filesystem.
* - Provide a DataSource, that can be passed to DMatrix::Create
* This can be used to re-use inmemory data structure into DMatrix.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches();
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief get column density */
virtual float GetColDensity(size_t cidx) = 0;
/*! \brief virtual destructor */
virtual ~DMatrix() = default;
/*!
* \brief Save DMatrix to local file.
* The saved file only works for non-sharded dataset(single machine training).
* This API is deprecated and dis-encouraged to use.
* \param fname The file name to be saved.
* \return The created DMatrix.
*/
virtual void SaveToLocalFile(const std::string& fname);
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
size_t page_size = kPageSize);
/*!
* \brief create a new DMatrix, by wrapping a row_iterator, and meta info.
* \param source The source iterator of the data, the create function takes ownership of the source.
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \return a Created DMatrix.
*/
static DMatrix* Create(std::unique_ptr<DataSource<SparsePage>>&& source,
const std::string& cache_prefix = "");
/*!
* \brief Create a DMatrix by loading data from parser.
* Parser can later be deleted after the DMatrix i created.
* \param parser The input data parser
* \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode.
* This can be nullptr for common cases, and in-memory mode will be used.
* \param page_size Page size for external memory.
* \sa dmlc::Parser
* \note dmlc-core provides efficient distributed data parser for libsvm format.
* User can create and register customized parser to load their own format using DMLC_REGISTER_DATA_PARSER.
* See "dmlc-core/include/dmlc/data.h" for detail.
* \return A created DMatrix.
*/
static DMatrix* Create(dmlc::Parser<uint32_t>* parser,
const std::string& cache_prefix = "",
size_t page_size = kPageSize);
/*! \brief page size 32 MB */
static const size_t kPageSize = 32UL << 20UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
virtual BatchSet<EllpackPage> GetEllpackBatches() = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches() {
return GetRowBatches();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches() {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches() {
return GetSortedColumnBatches();
}
template<>
inline BatchSet<EllpackPage> DMatrix::GetBatches() {
return GetEllpackBatches();
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
}
#endif // XGBOOST_DATA_H_
|
attention.c | #include "darknet.h"
#include <sys/time.h>
#include <assert.h>
void extend_data_truth(data *d, int n, float val)
{
int i, j;
for(i = 0; i < d->y.rows; ++i){
d->y.vals[i] = realloc(d->y.vals[i], (d->y.cols+n)*sizeof(float));
for(j = 0; j < n; ++j){
d->y.vals[i][d->y.cols + j] = val;
}
}
d->y.cols += n;
}
matrix network_loss_data(network *net, data test)
{
int i,b;
int k = 1;
matrix pred = make_matrix(test.X.rows, k);
float *X = calloc(net->batch*test.X.cols, sizeof(float));
float *y = calloc(net->batch*test.y.cols, sizeof(float));
for(i = 0; i < test.X.rows; i += net->batch){
for(b = 0; b < net->batch; ++b){
if(i+b == test.X.rows) break;
memcpy(X+b*test.X.cols, test.X.vals[i+b], test.X.cols*sizeof(float));
memcpy(y+b*test.y.cols, test.y.vals[i+b], test.y.cols*sizeof(float));
}
network orig = *net;
net->input = X;
net->truth = y;
net->train = 0;
net->delta = 0;
forward_network(net);
*net = orig;
float *delta = net->layers[net->n-1].output;
for(b = 0; b < net->batch; ++b){
if(i+b == test.X.rows) break;
int t = max_index(y + b*test.y.cols, 1000);
float err = sum_array(delta + b*net->outputs, net->outputs);
pred.vals[i+b][0] = -err;
//pred.vals[i+b][0] = 1-delta[b*net->outputs + t];
}
}
free(X);
free(y);
return pred;
}
void train_attention(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
int i, j;
float avg_cls_loss = -1;
float avg_att_loss = -1;
char *base = basecfg(cfgfile);
printf("%s\n", base);
printf("%d\n", ngpus);
network **nets = calloc(ngpus, sizeof(network*));
srand(time(0));
int seed = rand();
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
list *options = read_data_cfg(datacfg);
char *backup_directory = option_find_str(options, "backup", "/backup/");
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *train_list = option_find_str(options, "train", "data/train.list");
int classes = option_find_int(options, "classes", 2);
char **labels = get_labels(label_list);
list *plist = get_paths(train_list);
char **paths = (char **)list_to_array(plist);
printf("%d\n", plist->size);
int N = plist->size;
double time;
int divs=3;
int size=2;
load_args args = {0};
args.w = divs*net->w/size;
args.h = divs*net->h/size;
args.size = divs*net->w/size;
args.threads = 32;
args.hierarchy = net->hierarchy;
args.min = net->min_ratio*args.w;
args.max = net->max_ratio*args.w;
args.angle = net->angle;
args.aspect = net->aspect;
args.exposure = net->exposure;
args.saturation = net->saturation;
args.hue = net->hue;
args.paths = paths;
args.classes = classes;
args.n = imgs;
args.m = N;
args.labels = labels;
args.type = CLASSIFICATION_DATA;
data train;
data buffer;
pthread_t load_thread;
args.d = &buffer;
load_thread = load_data(args);
int epoch = (*net->seen)/N;
while(get_current_batch(net) < net->max_batches || net->max_batches == 0){
time = what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
data resized = resize_data(train, net->w, net->h);
extend_data_truth(&resized, divs*divs, 0);
data *tiles = tile_data(train, divs, size);
printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time = what_time_is_it_now();
float aloss = 0;
float closs = 0;
int z;
for (i = 0; i < divs*divs/ngpus; ++i) {
#pragma omp parallel for
for(j = 0; j < ngpus; ++j){
int index = i*ngpus + j;
extend_data_truth(tiles+index, divs*divs, SECRET_NUM);
matrix deltas = network_loss_data(nets[j], tiles[index]);
for(z = 0; z < resized.y.rows; ++z){
resized.y.vals[z][train.y.cols + index] = deltas.vals[z][0];
}
free_matrix(deltas);
}
}
int *inds = calloc(resized.y.rows, sizeof(int));
for(z = 0; z < resized.y.rows; ++z){
int index = max_index(resized.y.vals[z] + train.y.cols, divs*divs);
inds[z] = index;
for(i = 0; i < divs*divs; ++i){
resized.y.vals[z][train.y.cols + i] = (i == index)? 1 : 0;
}
}
data best = select_data(tiles, inds);
free(inds);
#ifdef GPU
if (ngpus == 1) {
closs = train_network(net, best);
} else {
closs = train_networks(nets, ngpus, best, 4);
}
#endif
for (i = 0; i < divs*divs; ++i) {
printf("%.2f ", resized.y.vals[0][train.y.cols + i]);
if((i+1)%divs == 0) printf("\n");
free_data(tiles[i]);
}
free_data(best);
printf("\n");
image im = float_to_image(64,64,3,resized.X.vals[0]);
//show_image(im, "orig");
//cvWaitKey(100);
/*
image im1 = float_to_image(64,64,3,tiles[i].X.vals[0]);
image im2 = float_to_image(64,64,3,resized.X.vals[0]);
show_image(im1, "tile");
show_image(im2, "res");
*/
#ifdef GPU
if (ngpus == 1) {
aloss = train_network(net, resized);
} else {
aloss = train_networks(nets, ngpus, resized, 4);
}
#endif
for(i = 0; i < divs*divs; ++i){
printf("%f ", nets[0]->output[1000 + i]);
if ((i+1) % divs == 0) printf("\n");
}
printf("\n");
free_data(resized);
free_data(train);
if(avg_cls_loss == -1) avg_cls_loss = closs;
if(avg_att_loss == -1) avg_att_loss = aloss;
avg_cls_loss = avg_cls_loss*.9 + closs*.1;
avg_att_loss = avg_att_loss*.9 + aloss*.1;
printf("%ld, %.3f: Att: %f, %f avg, Class: %f, %f avg, %f rate, %lf seconds, %ld images\n", get_current_batch(net), (float)(*net->seen)/N, aloss, avg_att_loss, closs, avg_cls_loss, get_current_rate(net), what_time_is_it_now()-time, *net->seen);
if(*net->seen/N > epoch){
epoch = *net->seen/N;
char buff[256];
sprintf(buff, "%s/%s_%d.weights",backup_directory,base, epoch);
save_weights(net, buff);
}
if(get_current_batch(net)%1000 == 0){
char buff[256];
sprintf(buff, "%s/%s.backup",backup_directory,base);
save_weights(net, buff);
}
}
char buff[256];
sprintf(buff, "%s/%s.weights", backup_directory, base);
save_weights(net, buff);
pthread_join(load_thread, 0);
free_network(net);
free_ptrs((void**)labels, classes);
free_ptrs((void**)paths, plist->size);
free_list(plist);
free(base);
}
void validate_attention_single(char *datacfg, char *filename, char *weightfile)
{
int i, j;
network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *leaf_list = option_find_str(options, "leaves", 0);
if(leaf_list) change_leaves(net->hierarchy, leaf_list);
char *valid_list = option_find_str(options, "valid", "data/train.list");
int classes = option_find_int(options, "classes", 2);
int topk = option_find_int(options, "top", 1);
char **labels = get_labels(label_list);
list *plist = get_paths(valid_list);
char **paths = (char **)list_to_array(plist);
int m = plist->size;
free_list(plist);
float avg_acc = 0;
float avg_topk = 0;
int *indexes = calloc(topk, sizeof(int));
int divs = 4;
int size = 2;
int extra = 0;
float *avgs = calloc(classes, sizeof(float));
int *inds = calloc(divs*divs, sizeof(int));
for(i = 0; i < m; ++i){
int class = -1;
char *path = paths[i];
for(j = 0; j < classes; ++j){
if(strstr(path, labels[j])){
class = j;
break;
}
}
image im = load_image_color(paths[i], 0, 0);
image resized = resize_min(im, net->w*divs/size);
image crop = crop_image(resized, (resized.w - net->w*divs/size)/2, (resized.h - net->h*divs/size)/2, net->w*divs/size, net->h*divs/size);
image rcrop = resize_image(crop, net->w, net->h);
//show_image(im, "orig");
//show_image(crop, "cropped");
//cvWaitKey(0);
float *pred = network_predict(net, rcrop.data);
//pred[classes + 56] = 0;
for(j = 0; j < divs*divs; ++j){
printf("%.2f ", pred[classes + j]);
if((j+1)%divs == 0) printf("\n");
}
printf("\n");
copy_cpu(classes, pred, 1, avgs, 1);
top_k(pred + classes, divs*divs, divs*divs, inds);
show_image(crop, "crop");
for(j = 0; j < extra; ++j){
int index = inds[j];
int row = index / divs;
int col = index % divs;
int y = row * crop.h / divs - (net->h - crop.h/divs)/2;
int x = col * crop.w / divs - (net->w - crop.w/divs)/2;
printf("%d %d %d %d\n", row, col, y, x);
image tile = crop_image(crop, x, y, net->w, net->h);
float *pred = network_predict(net, tile.data);
axpy_cpu(classes, 1., pred, 1, avgs, 1);
show_image(tile, "tile");
//cvWaitKey(10);
}
if(net->hierarchy) hierarchy_predictions(pred, net->outputs, net->hierarchy, 1, 1);
if(rcrop.data != resized.data) free_image(rcrop);
if(resized.data != im.data) free_image(resized);
free_image(im);
free_image(crop);
top_k(pred, classes, topk, indexes);
if(indexes[0] == class) avg_acc += 1;
for(j = 0; j < topk; ++j){
if(indexes[j] == class) avg_topk += 1;
}
printf("%d: top 1: %f, top %d: %f\n", i, avg_acc/(i+1), topk, avg_topk/(i+1));
}
}
void validate_attention_multi(char *datacfg, char *filename, char *weightfile)
{
int i, j;
network *net = load_network(filename, weightfile, 0);
set_batch_network(net, 1);
srand(time(0));
list *options = read_data_cfg(datacfg);
char *label_list = option_find_str(options, "labels", "data/labels.list");
char *valid_list = option_find_str(options, "valid", "data/train.list");
int classes = option_find_int(options, "classes", 2);
int topk = option_find_int(options, "top", 1);
char **labels = get_labels(label_list);
list *plist = get_paths(valid_list);
int scales[] = {224, 288, 320, 352, 384};
int nscales = sizeof(scales)/sizeof(scales[0]);
char **paths = (char **)list_to_array(plist);
int m = plist->size;
free_list(plist);
float avg_acc = 0;
float avg_topk = 0;
int *indexes = calloc(topk, sizeof(int));
for(i = 0; i < m; ++i){
int class = -1;
char *path = paths[i];
for(j = 0; j < classes; ++j){
if(strstr(path, labels[j])){
class = j;
break;
}
}
float *pred = calloc(classes, sizeof(float));
image im = load_image_color(paths[i], 0, 0);
for(j = 0; j < nscales; ++j){
image r = resize_min(im, scales[j]);
resize_network(net, r.w, r.h);
float *p = network_predict(net, r.data);
if(net->hierarchy) hierarchy_predictions(p, net->outputs, net->hierarchy, 1 , 1);
axpy_cpu(classes, 1, p, 1, pred, 1);
flip_image(r);
p = network_predict(net, r.data);
axpy_cpu(classes, 1, p, 1, pred, 1);
if(r.data != im.data) free_image(r);
}
free_image(im);
top_k(pred, classes, topk, indexes);
free(pred);
if(indexes[0] == class) avg_acc += 1;
for(j = 0; j < topk; ++j){
if(indexes[j] == class) avg_topk += 1;
}
printf("%d: top 1: %f, top %d: %f\n", i, avg_acc/(i+1), topk, avg_topk/(i+1));
}
}
void predict_attention(char *datacfg, char *cfgfile, char *weightfile, char *filename, int top)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", 0);
if(!name_list) name_list = option_find_str(options, "labels", "data/labels.list");
if(top == 0) top = option_find_int(options, "top", 1);
int i = 0;
char **names = get_labels(name_list);
clock_t time;
int *indexes = calloc(top, sizeof(int));
char buff[256];
char *input = buff;
while(1){
if(filename){
strncpy(input, filename, 256);
}else{
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input, 0, 0);
image r = letterbox_image(im, net->w, net->h);
//resize_network(&net, r.w, r.h);
//printf("%d %d\n", r.w, r.h);
float *X = r.data;
time=clock();
float *predictions = network_predict(net, X);
if(net->hierarchy) hierarchy_predictions(predictions, net->outputs, net->hierarchy, 1, 1);
top_k(predictions, net->outputs, top, indexes);
fprintf(stderr, "%s: Predicted in %f seconds.\n", input, sec(clock()-time));
for(i = 0; i < top; ++i){
int index = indexes[i];
//if(net->hierarchy) printf("%d, %s: %f, parent: %s \n",index, names[index], predictions[index], (net->hierarchy->parent[index] >= 0) ? names[net->hierarchy->parent[index]] : "Root");
//else printf("%s: %f\n",names[index], predictions[index]);
printf("%5.2f%%: %s\n", predictions[index]*100, names[index]);
}
if(r.data != im.data) free_image(r);
free_image(im);
if (filename) break;
}
}
void run_attention(int argc, char **argv)
{
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
int ngpus;
int *gpus = read_intlist(gpu_list, &ngpus, gpu_index);
int top = find_int_arg(argc, argv, "-t", 0);
int clear = find_arg(argc, argv, "-clear");
char *data = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
char *layer_s = (argc > 7) ? argv[7]: 0;
if(0==strcmp(argv[2], "predict")) predict_attention(data, cfg, weights, filename, top);
else if(0==strcmp(argv[2], "train")) train_attention(data, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_attention_single(data, cfg, weights);
else if(0==strcmp(argv[2], "validmulti")) validate_attention_multi(data, cfg, weights);
}
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/fourier.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p,
*magick_restrict q;
Quantum
*magick_restrict r;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
size_t
local_area = 0;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
local_area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
{
area+=local_area;
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
size_t
local_area = 0;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
local_area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
{
area+=local_area;
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=area*distortion[CompositePixelChannel];
image->error.normalized_mean_error=area*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
double
channel_distortion[MaxPixelChannels+1];
size_t
local_area = 0;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
local_area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
{
area+=local_area;
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
channels,
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distortion[i]+=area*QuantumScale*((double) p[i]-
image_statistics[channel].mean)*(GetPixelChannel(reconstruct_image,
channel,q)-reconstruct_statistics[channel].mean);
else
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-reconstruct_statistics[channel].mean);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
channels=0;
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
if (fabs(gamma) >= MagickEpsilon)
{
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
channels++;
}
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
channels);
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]);
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
area,
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
/*
Compute structural similarity index @
https://en.wikipedia.org/wiki/Structural_similarity.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
area=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,reconstruct_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
size_t
local_area = 0;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y-
((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/
2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
x_pixel_mu[MaxPixelChannels+1],
x_pixel_sigma_squared[MaxPixelChannels+1],
xy_sigma[MaxPixelChannels+1],
y_pixel_mu[MaxPixelChannels+1],
y_pixel_sigma_squared[MaxPixelChannels+1];
const Quantum
*magick_restrict reference,
*magick_restrict target;
MagickRealType
*k;
ssize_t
v;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
(void) memset(x_pixel_mu,0,sizeof(x_pixel_mu));
(void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared));
(void) memset(xy_sigma,0,sizeof(xy_sigma));
(void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
(void) memset(y_pixel_mu,0,sizeof(y_pixel_mu));
(void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
k=kernel_info->values;
reference=p;
target=q;
for (v=0; v < (ssize_t) kernel_info->height; v++)
{
ssize_t
u;
for (u=0; u < (ssize_t) kernel_info->width; u++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
x_pixel,
y_pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel=QuantumScale*reference[i];
x_pixel_mu[i]+=(*k)*x_pixel;
x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel;
y_pixel=QuantumScale*
GetPixelChannel(reconstruct_image,channel,target);
y_pixel_mu[i]+=(*k)*y_pixel;
y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel;
xy_sigma[i]+=(*k)*x_pixel*y_pixel;
}
k++;
reference+=GetPixelChannels(image);
target+=GetPixelChannels(reconstruct_image);
}
reference+=GetPixelChannels(image)*columns;
target+=GetPixelChannels(reconstruct_image)*columns;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
ssim,
x_pixel_mu_squared,
x_pixel_sigmas_squared,
xy_mu,
xy_sigmas,
y_pixel_mu_squared,
y_pixel_sigmas_squared;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i];
y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i];
xy_mu=x_pixel_mu[i]*y_pixel_mu[i];
xy_sigmas=xy_sigma[i]-xy_mu;
x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared;
y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared;
ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/
((x_pixel_mu_squared+y_pixel_mu_squared+c1)*
(x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2));
channel_distortion[i]+=ssim;
channel_distortion[CompositePixelChannel]+=ssim;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
local_area++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
{
area+=local_area;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]+=channel_distortion[i];
}
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[j]/=area;
}
distortion[CompositePixelChannel]/=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_HDRI_SUPPORT) && defined(MAGICKCORE_FFTW_DELEGATE)
static Image *CrossCorrelationImage(const Image *alpha_image,
const Image *beta_image,ExceptionInfo *exception)
{
Image
*clone_image,
*complex_conjugate,
*complex_multiplication,
*cross_correlation,
*fft_images;
/*
Take the FFT of beta image.
*/
clone_image=CloneImage(beta_image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return(clone_image);
(void) SetImageArtifact(clone_image,"fourier:normalize","inverse");
fft_images=ForwardFourierTransformImage(clone_image,MagickFalse,
exception);
clone_image=DestroyImageList(clone_image);
if (fft_images == (Image *) NULL)
return(fft_images);
/*
Take the complex conjugate of beta image.
*/
complex_conjugate=ComplexImages(fft_images,ConjugateComplexOperator,
exception);
fft_images=DestroyImageList(fft_images);
if (complex_conjugate == (Image *) NULL)
return(complex_conjugate);
/*
Take the FFT of the alpha image.
*/
clone_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
{
complex_conjugate=DestroyImageList(complex_conjugate);
return(clone_image);
}
(void) SetImageArtifact(clone_image,"fourier:normalize","inverse");
fft_images=ForwardFourierTransformImage(clone_image,MagickFalse,exception);
clone_image=DestroyImageList(clone_image);
if (fft_images == (Image *) NULL)
{
complex_conjugate=DestroyImageList(complex_conjugate);
return(fft_images);
}
complex_conjugate->next->next=fft_images;
/*
Do complex multiplication.
*/
(void) SetImageArtifact(complex_conjugate,"compose:clamp","false");
complex_multiplication=ComplexImages(complex_conjugate,
MultiplyComplexOperator,exception);
complex_conjugate=DestroyImageList(complex_conjugate);
if (fft_images == (Image *) NULL)
return(fft_images);
/*
Do the IFT and return the cross-correlation result.
*/
cross_correlation=InverseFourierTransformImage(complex_multiplication,
complex_multiplication->next,MagickFalse,exception);
complex_multiplication=DestroyImageList(complex_multiplication);
return(cross_correlation);
}
static Image *NCCDivideImage(const Image *alpha_image,const Image *beta_image,
ExceptionInfo *exception)
{
CacheView
*alpha_view,
*beta_view;
Image
*divide_image;
MagickBooleanType
status;
ssize_t
y;
/*
Divide one image into another.
*/
divide_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (divide_image == (Image *) NULL)
return(divide_image);
status=MagickTrue;
alpha_view=AcquireAuthenticCacheView(divide_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(beta_image,divide_image,divide_image->rows,1)
#endif
for (y=0; y < (ssize_t) divide_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(alpha_view,0,y,divide_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) divide_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(divide_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(divide_image,i);
PixelTrait traits = GetPixelChannelTraits(divide_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(p[i]) >= MagickEpsilon)
q[i]*=PerceptibleReciprocal(QuantumScale*p[i]);
}
p+=GetPixelChannels(beta_image);
q+=GetPixelChannels(divide_image);
}
if (SyncCacheViewAuthenticPixels(alpha_view,exception) == MagickFalse)
status=MagickFalse;
}
beta_view=DestroyCacheView(beta_view);
alpha_view=DestroyCacheView(alpha_view);
if (status == MagickFalse)
divide_image=DestroyImage(divide_image);
return(divide_image);
}
static MagickBooleanType NCCMaximaImage(const Image *image,double *maxima,
RectangleInfo *offset,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Identify the maxima value in the image and its location.
*/
status=MagickTrue;
*maxima=0.0;
offset->x=0;
offset->y=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
sum = 0.0;
ssize_t
channels = 0,
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
sum+=p[i];
channels++;
}
if ((channels != 0) && ((sum/channels) > *maxima))
{
*maxima=sum/channels;
offset->x=x;
offset->y=y;
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType NCCMultiplyImage(Image *image,const double factor,
const ChannelStatistics *channel_statistics,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Multiply each pixel by a factor.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics != (const ChannelStatistics *) NULL)
q[i]*=QuantumScale*channel_statistics[channel].standard_deviation;
q[i]*=factor;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
static Image *NCCSquareImage(const Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*square_image;
MagickBooleanType
status;
ssize_t
y;
/*
Square each pixel in the image.
*/
square_image=CloneImage(image,0,0,MagickTrue,exception);
if (square_image == (Image *) NULL)
return(square_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(square_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(square_image,square_image,square_image->rows,1)
#endif
for (y=0; y < (ssize_t) square_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,square_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) square_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(square_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(square_image,i);
PixelTrait traits = GetPixelChannelTraits(square_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]*=QuantumScale*q[i];
}
q+=GetPixelChannels(square_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
square_image=DestroyImage(square_image);
return(square_image);
}
static Image *NCCSubtractImageMean(const Image *alpha_image,
const Image *beta_image,const ChannelStatistics *channel_statistics,
ExceptionInfo *exception)
{
CacheView
*beta_view,
*image_view;
Image
*gamma_image;
MagickBooleanType
status;
ssize_t
y;
/*
Subtract the image mean and pad.
*/
gamma_image=CloneImage(beta_image,alpha_image->columns,alpha_image->rows,
MagickTrue,exception);
if (gamma_image == (Image *) NULL)
return(gamma_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(gamma_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(beta_image,gamma_image,gamma_image->rows,1)
#endif
for (y=0; y < (ssize_t) gamma_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,gamma_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) gamma_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(gamma_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(gamma_image,i);
PixelTrait traits = GetPixelChannelTraits(gamma_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((x >= (ssize_t) beta_image->columns) ||
(y >= (ssize_t) beta_image->rows))
q[i]=(Quantum) 0;
else
q[i]=p[i]-channel_statistics[channel].mean;
}
p+=GetPixelChannels(beta_image);
q+=GetPixelChannels(gamma_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
beta_view=DestroyCacheView(beta_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
gamma_image=DestroyImage(gamma_image);
return(gamma_image);
}
static Image *NCCUnityImage(const Image *alpha_image,const Image *beta_image,
ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*unity_image;
MagickBooleanType
status;
ssize_t
y;
/*
Create a padded unity image.
*/
unity_image=CloneImage(alpha_image,alpha_image->columns,alpha_image->rows,
MagickTrue,exception);
if (unity_image == (Image *) NULL)
return(unity_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(unity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(unity_image,unity_image,unity_image->rows,1)
#endif
for (y=0; y < (ssize_t) unity_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,unity_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) unity_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(unity_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(unity_image,i);
PixelTrait traits = GetPixelChannelTraits(unity_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=QuantumRange;
if ((x >= (ssize_t) beta_image->columns) ||
(y >= (ssize_t) beta_image->rows))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(unity_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
unity_image=DestroyImage(unity_image);
return(unity_image);
}
static Image *NCCVarianceImage(Image *alpha_image,const Image *beta_image,
ExceptionInfo *exception)
{
CacheView
*beta_view,
*image_view;
Image
*variance_image;
MagickBooleanType
status;
ssize_t
y;
/*
Compute the variance of the two images.
*/
variance_image=CloneImage(alpha_image,0,0,MagickTrue,exception);
if (variance_image == (Image *) NULL)
return(variance_image);
status=MagickTrue;
image_view=AcquireAuthenticCacheView(variance_image,exception);
beta_view=AcquireVirtualCacheView(beta_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(beta_image,variance_image,variance_image->rows,1)
#endif
for (y=0; y < (ssize_t) variance_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(beta_view,0,y,beta_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,variance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) variance_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(variance_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(variance_image,i);
PixelTrait traits = GetPixelChannelTraits(variance_image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum((QuantumRange*sqrt(fabs((double) QuantumScale*
(q[i]-p[i])))))/sqrt((double) QuantumRange);
}
p+=GetPixelChannels(beta_image);
q+=GetPixelChannels(variance_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
beta_view=DestroyCacheView(beta_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
variance_image=DestroyImage(variance_image);
return(variance_image);
}
static Image *NCCSimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define DestroySimilarityResources() \
{ \
if (channel_statistics != (ChannelStatistics *) NULL) \
channel_statistics=(ChannelStatistics *) \
RelinquishMagickMemory(channel_statistics); \
if (beta_image != (Image *) NULL) \
beta_image=DestroyImage(beta_image); \
if (gamma_image != (Image *) NULL) \
gamma_image=DestroyImage(gamma_image); \
if (ncc_image != (Image *) NULL) \
ncc_image=DestroyImage(ncc_image); \
if (normalize_image != (Image *) NULL) \
normalize_image=DestroyImage(normalize_image); \
if (square_image != (Image *) NULL) \
square_image=DestroyImage(square_image); \
if (unity_image != (Image *) NULL) \
unity_image=DestroyImage(unity_image); \
}
#define ThrowSimilarityException() \
{ \
DestroySimilarityResources() \
return((Image *) NULL); \
}
ChannelStatistics
*channel_statistics = (ChannelStatistics *) NULL;
double
maxima = 0.0;
Image
*beta_image = (Image *) NULL,
*correlation_image = (Image *) NULL,
*gamma_image = (Image *) NULL,
*ncc_image = (Image *) NULL,
*normalize_image = (Image *) NULL,
*square_image = (Image *) NULL,
*unity_image = (Image *) NULL;
MagickBooleanType
status;
RectangleInfo
geometry;
/*
Accelerated correlation-based image similary using FFT local statistics.
Contributed by Fred Weinhaus.
*/
square_image=NCCSquareImage(image,exception);
if (square_image == (Image *) NULL)
ThrowSimilarityException();
unity_image=NCCUnityImage(image,reference,exception);
if (unity_image == (Image *) NULL)
ThrowSimilarityException();
/*
Compute the cross correlation of the square and unity images.
*/
ncc_image=CrossCorrelationImage(square_image,unity_image,exception);
square_image=DestroyImage(square_image); \
if (ncc_image == (Image *) NULL)
ThrowSimilarityException();
status=NCCMultiplyImage(ncc_image,(double) QuantumRange*reference->columns*
reference->rows,(const ChannelStatistics *) NULL,exception);
if (status == MagickFalse)
ThrowSimilarityException();
/*
Compute the cross correlation of the source and unity images.
*/
gamma_image=CrossCorrelationImage(image,unity_image,exception);
unity_image=DestroyImage(unity_image);
if (gamma_image == (Image *) NULL)
ThrowSimilarityException();
square_image=NCCSquareImage(gamma_image,exception);
gamma_image=DestroyImage(gamma_image);
status=NCCMultiplyImage(square_image,(double) QuantumRange,
(const ChannelStatistics *) NULL,exception);
if (status == MagickFalse)
ThrowSimilarityException();
/*
Compute the variance of the two images.
*/
gamma_image=NCCVarianceImage(ncc_image,square_image,exception);
square_image=DestroyImage(square_image);
ncc_image=DestroyImage(ncc_image);
if (gamma_image == (Image *) NULL)
ThrowSimilarityException();
channel_statistics=GetImageStatistics(reference,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
ThrowSimilarityException();
/*
Subtract the image mean.
*/
status=NCCMultiplyImage(gamma_image,1.0,channel_statistics,exception);
if (status == MagickFalse)
ThrowSimilarityException();
normalize_image=NCCSubtractImageMean(image,reference,channel_statistics,
exception);
if (normalize_image == (Image *) NULL)
ThrowSimilarityException();
ncc_image=CrossCorrelationImage(image,normalize_image,exception);
normalize_image=DestroyImage(normalize_image);
if (ncc_image == (Image *) NULL)
ThrowSimilarityException();
/*
Divide the two images.
*/
beta_image=NCCDivideImage(ncc_image,gamma_image,exception);
ncc_image=DestroyImage(ncc_image);
gamma_image=DestroyImage(gamma_image);
if (beta_image == (Image *) NULL)
ThrowSimilarityException();
(void) ResetImagePage(beta_image,"0x0+0+0");
SetGeometry(image,&geometry);
geometry.width=image->columns-reference->columns;
geometry.height=image->rows-reference->rows;
/*
Crop padding.
*/
correlation_image=CropImage(beta_image,&geometry,exception);
beta_image=DestroyImage(beta_image);
if (correlation_image == (Image *) NULL)
ThrowSimilarityException();
(void) ResetImagePage(correlation_image,"0x0+0+0");
/*
Identify the maxima value in the image and its location.
*/
status=GrayscaleImage(correlation_image,AveragePixelIntensityMethod,
exception);
if (status == MagickFalse)
ThrowSimilarityException();
status=NCCMaximaImage(correlation_image,&maxima,offset,exception);
if (status == MagickFalse)
{
correlation_image=DestroyImage(correlation_image);
ThrowSimilarityException();
}
*similarity_metric=1.0-QuantumScale*maxima;
DestroySimilarityResources();
return(correlation_image);
}
#endif
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
#if defined(MAGICKCORE_HDRI_SUPPORT) && defined(MAGICKCORE_FFTW_DELEGATE)
{
const char *artifact = GetImageArtifact(image,"compare:accelerate-ncc");
MagickBooleanType accelerate = (artifact != (const char *) NULL) &&
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
if ((accelerate != MagickFalse) &&
(metric == NormalizedCrossCorrelationErrorMetric))
{
similarity_image=NCCSimilarityImage(image,reference,metric,
similarity_threshold,offset,similarity_metric,exception);
return(similarity_image);
}
}
#endif
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
extra_data.c | //
// Created by sachetto on 01/10/17.
//
#include <unistd.h>
#include "../config/extra_data_config.h"
#include "../config_helpers/config_helpers.h"
#include "../libraries_common/common_data_structures.h"
#include "../utils/file_utils.h"
real* set_commom_schemia_data(struct config *config, uint32_t num_cells, int num_par, size_t *extra_data_size) {
*extra_data_size = sizeof(real)*(num_cells + num_par);
real *extra_data = (real*)malloc(*extra_data_size);
real atpi = 6.8;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, atpi, config->config_data, "atpi");
real Ko = 5.4;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ko, config->config_data, "Ko");
real Ki = 138.3;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ki, config->config_data, "Ki");
real GNa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GNa_multiplicator, config->config_data, "GNa_multiplicator");
real GCaL_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GCaL_multiplicator, config->config_data, "GCaL_multiplicator");
real INaCa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, INaCa_multiplicator, config->config_data, "INaCa_multiplicator");
real Vm_modifier = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Vm_modifier, config->config_data, "Vm_modifier");
extra_data[0] = atpi;
extra_data[1] = Ko;
extra_data[2] = Ki;
extra_data[3] = Vm_modifier;
extra_data[4] = GNa_multiplicator;
extra_data[5] = GCaL_multiplicator;
extra_data[6] = INaCa_multiplicator;
return extra_data;
}
SET_EXTRA_DATA(set_extra_data_for_fibrosis_sphere) {
uint32_t num_active_cells = the_grid->num_active_cells;
struct cell_node ** ac = the_grid->active_cells;
real *fibs = NULL;
real plain_center = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, plain_center, config->config_data, "plain_center");
real border_zone_size = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, border_zone_size, config->config_data, "border_zone_size");
real sphere_radius = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, sphere_radius, config->config_data, "sphere_radius");
int num_par = 7;
fibs = set_commom_schemia_data(config, num_active_cells, num_par, extra_data_size);
OMP(parallel for)
for (uint32_t i = 0; i < num_active_cells; i++) {
if(FIBROTIC(ac[i])) {
fibs[i+num_par] = 0.0;
}
else if(BORDER_ZONE(ac[i])) {
real center_x = (real)ac[i]->center.x;
real center_y = (real)ac[i]->center.y;
//TODO: Maybe we want the distance from the Z as well
//real center_z = (real)ac[i]->center_z;
real distanceFromCenter = sqrtf((center_x - plain_center)*(center_x - plain_center) + (center_y - plain_center)*(center_y - plain_center));
distanceFromCenter = (distanceFromCenter - sphere_radius)/border_zone_size;
fibs[i+num_par] = distanceFromCenter;
}
else {
fibs[i+num_par] = 1.0;
}
}
return (void*)fibs;
}
SET_EXTRA_DATA(set_extra_data_for_fibrosis_plain) {
uint32_t num_active_cells = the_grid->num_active_cells;
int num_par = 7;
real *fibs = NULL;
fibs = set_commom_schemia_data(config, num_active_cells, num_par, extra_data_size);
for(uint32_t i = num_par; i < num_active_cells + num_par; i++) {
fibs[i] = 0.0;
}
return (void*)fibs;
}
SET_EXTRA_DATA(set_extra_data_for_no_fibrosis) {
uint32_t num_active_cells = the_grid->num_active_cells;
int num_par = 7;
real *fibs = NULL;
fibs = set_commom_schemia_data(config, num_active_cells, num_par, extra_data_size);
for(uint32_t i = num_par; i < num_active_cells + num_par; i++) {
fibs[i] = 1.0;
}
return (void*)fibs;
}
SET_EXTRA_DATA(set_extra_data_for_human_full_mesh) {
uint32_t num_active_cells = the_grid->num_active_cells;
int num_par = 7;
real *fibs = NULL;
fibs = set_commom_schemia_data(config, num_active_cells, num_par, extra_data_size);
struct cell_node ** ac = the_grid->active_cells;
real_cpu small_scar_center_x = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, small_scar_center_x, config->config_data, "small_scar_center_x");
real_cpu small_scar_center_y = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, small_scar_center_y, config->config_data, "small_scar_center_y");
real_cpu small_scar_center_z = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, small_scar_center_z, config->config_data, "small_scar_center_z");
real_cpu big_scar_center_x = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, big_scar_center_x, config->config_data, "big_scar_center_x");
real_cpu big_scar_center_y = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, big_scar_center_y, config->config_data, "big_scar_center_y");
real_cpu big_scar_center_z = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, big_scar_center_z, config->config_data, "big_scar_center_z");
real_cpu bz_size_big = 0;
real_cpu bz_size_small = 0;
real_cpu dist_big = 0;
real_cpu dist_small = 0;
uint32_t i;
bool fibrotic, border_zone;
char scar_type;
OMP(parallel for private(dist_big, dist_small))
for (i = 0; i < num_active_cells; i++) {
border_zone = BORDER_ZONE(ac[i]);
scar_type = SCAR_TYPE(ac[i]);
if (ac[i]->active && border_zone) {
real_cpu center_x = ac[i]->center.x;
real_cpu center_y = ac[i]->center.y;
real_cpu center_z = ac[i]->center.z;
if(scar_type == 'b') {
dist_big = sqrt((center_x - big_scar_center_x) * (center_x - big_scar_center_x) +
(center_y - big_scar_center_y) * (center_y - big_scar_center_y) +
(center_z - big_scar_center_z) * (center_z - big_scar_center_z));
OMP(critical(big))
if (dist_big > bz_size_big) {
bz_size_big = dist_big;
}
}
else if(scar_type == 's') {
dist_small = sqrt((center_x - small_scar_center_x) * (center_x - small_scar_center_x) +
(center_y - small_scar_center_y) * (center_y - small_scar_center_y) +
(center_z - small_scar_center_z) * (center_z - small_scar_center_z));
OMP(critical(small))
if (dist_small > bz_size_small) {
bz_size_small = dist_small;
}
}
}
}
OMP(parallel for private(dist_big, dist_small))
for (i = 0; i < num_active_cells; i++) {
if (ac[i]->active) {
fibrotic = FIBROTIC(ac[i]);
border_zone = BORDER_ZONE(ac[i]);
scar_type = SCAR_TYPE(ac[i]);
if(fibrotic) {
fibs[i+num_par] = 0.0f;
}
else if (border_zone) {
real_cpu center_x = ac[i]->center.x;
real_cpu center_y = ac[i]->center.y;
real_cpu center_z = ac[i]->center.z;
if(scar_type == 'b') {
dist_big = sqrt((center_x - big_scar_center_x) * (center_x - big_scar_center_x) +
(center_y - big_scar_center_y) * (center_y - big_scar_center_y) +
(center_z - big_scar_center_z) * (center_z - big_scar_center_z));
fibs[i+num_par] = (real)(dist_big / bz_size_big);
}
else if(scar_type == 's') {
dist_small = sqrt((center_x - small_scar_center_x) * (center_x - small_scar_center_x) +
(center_y - small_scar_center_y) * (center_y - small_scar_center_y) +
(center_z - small_scar_center_z) * (center_z - small_scar_center_z));
fibs[i+num_par] = (real)(dist_small / bz_size_small);
}
else {
fibs[i+num_par] = 1.0f;
}
}
}
}
return (void*)fibs;
}
SET_EXTRA_DATA(set_extra_data_for_scar_wedge) {
uint32_t num_active_cells = the_grid->num_active_cells;
real *fibs = NULL;
int num_par = 7;
fibs = set_commom_schemia_data(config, num_active_cells, num_par, extra_data_size);
struct cell_node ** ac = the_grid->active_cells;
char *scar_size;
GET_PARAMETER_VALUE_CHAR_OR_REPORT_ERROR (scar_size, config->config_data, "scar_size");
uint8_t size_code;
if(strcmp(scar_size, "big") == 0) {
size_code = 0;
}
else if(strcmp(scar_size, "small") == 0) {
size_code = 1;
}
else {
printf("Function: set_extra_data_for_scar_edge, invalid scar size %s. Valid sizes are big or small. Exiting!\n", scar_size);
exit(EXIT_FAILURE);
}
real_cpu scar_center_x;
real_cpu scar_center_y;
real_cpu scar_center_z;
////Fibrosis configuration
//BIG SCAR
if(size_code == 0) {
scar_center_x = 95300;
scar_center_y = 81600;
scar_center_z = 36800;
}
else {
scar_center_x = 52469;
scar_center_y = 83225;
scar_center_z = 24791;
}
real_cpu bz_size = 0.0;
real_cpu dist;
uint32_t i;
bool border_zone, fibrotic;
OMP(parallel for private(dist))
for (i = 0; i < num_active_cells; i++) {
if(ac[i]->active) {
border_zone = BORDER_ZONE(ac[i]);
if(border_zone) {
real_cpu center_x = ac[i]->center.x;
real_cpu center_y = ac[i]->center.y;
real_cpu center_z = ac[i]->center.z;
dist = sqrt((center_x - scar_center_x)*(center_x - scar_center_x) + (center_y - scar_center_y)*(center_y - scar_center_y) + (center_z - scar_center_z)*(center_z - scar_center_z) );
OMP(critical)
if(dist > bz_size) {
bz_size = dist;
}
}
}
}
OMP(parallel for private(dist))
for (i = 0; i < num_active_cells; i++) {
if(ac[i]->active) {
border_zone = BORDER_ZONE(ac[i]);
fibrotic = FIBROTIC(ac[i]);
if(fibrotic) {
fibs[i+num_par] = 0.0;
}
else if(border_zone) {
real_cpu center_x = ac[i]->center.x;
real_cpu center_y = ac[i]->center.y;
real_cpu center_z = ac[i]->center.z;
dist = sqrt((center_x - scar_center_x)*(center_x - scar_center_x) + (center_y - scar_center_y)*(center_y - scar_center_y) + (center_z - scar_center_z)*(center_z - scar_center_z) );
dist = dist/bz_size;
fibs[i + num_par] = (real)dist;
}
else {
fibs[i + num_par] = 1.0f;
}
}
}
return (void*)fibs;
}
SET_EXTRA_DATA(set_extra_data_for_benchmark) {
*extra_data_size = sizeof(real)*19;
real *initial_conditions = (real*)malloc(*extra_data_size);
// Initial conditions // Var Units Initial value
initial_conditions[ 0] = -85.423f; // V; millivolt; -85.423
initial_conditions[ 1] = 0.0165; // Xr1; dimensionless; 0.0165
initial_conditions[ 2] = 0.473; // Xr2; dimensionless; 0.473
initial_conditions[ 3] = 0.0174; // Xs; dimensionless; 0.0174
initial_conditions[ 4] = 0.00165; // m; dimensionless; 0.00165
initial_conditions[ 5] = 0.749; // h; dimensionless; 0.749
initial_conditions[ 6] = 0.6788; // j; dimensionless; 0.6788
initial_conditions[ 7] = 3.288e-5; // d; dimensionless; 3.288e-5
initial_conditions[ 8] = 0.7026; // f; dimensionless; 0.7026
initial_conditions[ 9] = 0.9526; // f2; dimensionless; 0.9526
initial_conditions[10] = 0.9942; // fCass; dimensionless; 0.9942
initial_conditions[11] = 0.999998; // s; dimensionless; 0.999998
initial_conditions[12] = 2.347e-8; // r; dimensionless; 2.347e-8
initial_conditions[13] = 0.000153; // Ca_i; millimolar; 0.000153
initial_conditions[14] = 4.272; // Ca_SR; millimolar; 4.272
initial_conditions[15] = 0.00042; // Ca_ss; millimolar; 0.00042
initial_conditions[16] = 0.8978; // R_prime; dimensionless; 0.8978
initial_conditions[17] = 10.132; // Na_i; millimolar; 10.132
initial_conditions[18] = 138.52; // K_i; millimolar; 138.52
return (void*)initial_conditions;
}
SET_EXTRA_DATA(set_extra_data_for_fibrosis_sphere_atpi_changed) {
uint32_t num_active_cells = the_grid->num_active_cells;
struct cell_node ** ac = the_grid->active_cells;
real plain_center = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, plain_center, config->config_data, "plain_center");
real border_zone_size = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, border_zone_size, config->config_data, "border_zone_size");
real sphere_radius = 0.0;
GET_PARAMETER_NUMERIC_VALUE_OR_REPORT_ERROR(real, sphere_radius, config->config_data, "sphere_radius");
int num_par = 6;
int num_tt_par = 12;
//num_tt_par = 12 initial conditions of tt3, num_par 6, extra data
*extra_data_size = sizeof(real)*(num_par+num_tt_par+num_active_cells);
real *extra_data = (real*)malloc(*extra_data_size);
real atpi = 6.8;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, atpi, config->config_data, "atpi");
real Ko = 5.4;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ko, config->config_data, "Ko");
real Ki = 138.3;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Ki, config->config_data, "Ki");
real GNa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GNa_multiplicator, config->config_data, "GNa_multiplicator");
real GCa_multiplicator = 1.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, GCa_multiplicator, config->config_data, "GCa_multiplicator");
real Vm_modifier = 0.0f;
GET_PARAMETER_NUMERIC_VALUE_OR_USE_DEFAULT(real, Vm_modifier, config->config_data, "Vm_modifier");
// Extra parameters section
extra_data[0] = atpi;
extra_data[1] = Ko;
extra_data[2] = Ki;
extra_data[3] = Vm_modifier;
extra_data[4] = GNa_multiplicator;
extra_data[5] = GCa_multiplicator;
// Extra initial conditions section
extra_data[6] = -79.5089;
extra_data[7] = 0.0056681;
extra_data[8] = 0.554756;
extra_data[9] = 0.54673;
extra_data[10] = 0.000565801;
extra_data[11] = 0.00486328;
extra_data[12] = 0.787571;
extra_data[13] = 0.998604;
extra_data[14] = 0.998842;
extra_data[15] = 7.23073e-05;
extra_data[16] = 6.2706e-08;
extra_data[17] = 0.412462;
// Fibrotic cells configuration
#pragma omp parallel for
for (uint32_t i = 0; i < num_active_cells; i++) {
if(FIBROTIC(ac[i])) {
extra_data[i+num_par+num_tt_par] = 0.0;
}
else if(BORDER_ZONE(ac[i])) {
real center_x = (real)ac[i]->center.x;
real center_y = (real)ac[i]->center.y;
//TODO: Maybe we want the distance from the Z as well
//real center_z = (real)ac[i]->center_z;
real distanceFromCenter = sqrtf((center_x - plain_center)*(center_x - plain_center) + (center_y - plain_center)*(center_y - plain_center));
distanceFromCenter = (distanceFromCenter - sphere_radius)/border_zone_size;
extra_data[i+num_par+num_tt_par] = distanceFromCenter;
}
else {
extra_data[i+num_par+num_tt_par] = 1.0;
}
}
return (void*)extra_data;
}
|
clik_parametric_addon.c | #include "clik_parametric.h"
#include "clik_helper.h"
#include "clik_parametric_addon.h"
typedef struct {
parametric *p_model;
double *rq,*wrq;
int nell,nbins,m;
double unit;
int *ell;
double *bins,*wl;
double *wbins;
int *bi,*bo;
int bn;
double *A;
int ismul;
} parametric_smica;
void comp_parametric_update(void* data,double* locpars, double* rq, error **err) {
parametric_smica *p_pay;
SmicaComp* SC;
double *wl,*wl0,one;
int inc,il,im;
double res;
int m, ndet;
//double r10[16];
char nm[3000];
SC = data;
p_pay = SC->data;
//_DEBUGHERE_("%g",locpars[0]);
parametric_compute(p_pay->p_model, locpars, p_pay->rq, NULL, err);
forwardError(*err,__LINE__,);
//sprintf(nm,"rq_%s.dat",SC->comp_name);
//write_bin_vector(p_pay->rq, nm, sizeof(double)*(p_pay->nell*p_pay->p_model->ndet*p_pay->p_model->ndet), err);
//forwardError(*err,__LINE__,);
m = p_pay->m;
ndet = p_pay->p_model->ndet;
//_DEBUGHERE_("%d %d",m,ndet);
// apply wl and binning
one=1;
int rn =0;
if (p_pay->wl==NULL) {
wl0 = &one;
inc = 0;
} else {
wl0 = p_pay->wl;
inc = 1;
rn = 1;
}
if ((rn==0) && (fabs((p_pay->unit-1))>1e-7)) {
rn = 1;
}
if (rn==0) {
for(im=0;im<ndet;im++) {
if (fabs(p_pay->A[im]-1)>1e-7) {
rn=1;
}
}
}
//_DEBUGHERE_("%d",rn);
if (rn==1) {
//#pragma omp parallel for private(il,ndet)
for(il=0;il<p_pay->nell;il++) {
int ip;
int im1,im2;
ip = il * ndet * ndet;
for(im1=0;im1<ndet;im1++) {
for(im2=0;im2<ndet;im2++) {
p_pay->rq[il*ndet*ndet+im1*ndet+im2] = p_pay->rq[il*ndet*ndet+im1*ndet+im2] * *wl * p_pay->unit * p_pay->A[im1]*p_pay->A[im2];
}
}
wl+=inc;
}
}
//_DEBUGHERE_("%g %g %g %g",egfs_pay->A[0],egfs_pay->A[1],egfs_pay->A[2],egfs_pay->A[3])
//_DEBUGHERE_("%g %g %g",egfs_pay->rq[0],egfs_pay->rq[2],egfs_pay->unit);
// apply binning if needed
//sprintf(nm,"rq_before_%s.dat",SC->comp_name);
//write_bin_vector(rq, nm, sizeof(double)*(p_pay->nbins*p_pay->p_model->ndet*p_pay->p_model->ndet), err);
//forwardError(*err,__LINE__,);
if (p_pay->ismul==0) {
if (p_pay->bins!=NULL) {
int npar;
int one,nbns,nell;
int ndim;
ndim = m*m;
nbns = p_pay->nbins;
nell = p_pay->nell;
{
int il,iq,if1,if2,bb;
double w;
bb=0;
//#pragma omp parallel for private(iq,il,if1,if2,w,bb,ndet,m)
for(iq=0;iq<nbns;iq++) {
for(il=p_pay->bi[iq];il<p_pay->bo[iq];il++) {
w = p_pay->wbins[bb];
bb++;
for(if1=0;if1<ndet;if1++) {
for(if2=0;if2<ndet;if2++) {
//_DEBUGHERE_("%d %d %g %d,%d %g %g",il,iq,w,if1,if2,rq[iq*ndim+if1*m+if2],p_pay->rq[il*ndet*ndet+if1*ndet+if2]);
rq[iq*ndim+if1*m+if2] += w * p_pay->rq[il*ndet*ndet+if1*ndet+if2];
}
}
}
}
}
} else {
int if1,if2;
//#pragma omp parallel for private(il,if1,if2,m)
for(il=0;il<p_pay->nell;il++) {
for(if1=0;if1<ndet;if1++) {
for(if2=0;if2<ndet;if2++) {
rq[il*m*m+if1*m+if2] += p_pay->rq[il*ndet*ndet+if1*ndet+if2];
}
}
}
}
} else {
//_DEBUGHERE_("MUL","");
if (p_pay->bins!=NULL) {
int npar;
int one,nbns,nell;
int ndim;
ndim = m*m;
nbns = p_pay->nbins;
nell = p_pay->nell;
{
int il,iq,if1,if2,bb,b0;
double w,acc;
b0=0;
//#pragma omp parallel for private(il,iq,if1,if2,b0,bb,acc,w,m,ndet)
for(iq=0;iq<nbns;iq++) {
for(if1=0;if1<ndet;if1++) {
for(if2=0;if2<ndet;if2++) {
bb=b0;
acc = 0;
for(il=p_pay->bi[iq];il<p_pay->bo[iq];il++) {
w = p_pay->wbins[bb];
bb++;
//_DEBUGHERE_("%d %d %g %d,%d %g %g",il,iq,w,if1,if2,rq[iq*ndim+if1*m+if2],p_pay->rq[il*ndet*ndet+if1*ndet+if2]);
acc += w * p_pay->rq[il*ndet*ndet+if1*ndet+if2];
//_DEBUGHERE_("%d %d %d %g %g %g",il,if1,if2,w,p_pay->rq[il*ndet*ndet+if1*ndet+if2],acc)
}
//_DEBUGHERE_("%d %d %d %g %g ",iq,if1,if2,rq[iq*ndim+if1*m+if2],acc);
rq[iq*ndim+if1*m+if2] *= acc;
}
}
b0=bb;
}
}
} else {
int if1,if2;
//#pragma omp parallel for private(il,if1,if2,m,ndet)
for(il=0;il<p_pay->nell;il++) {
for(if1=0;if1<ndet;if1++) {
for(if2=0;if2<ndet;if2++) {
rq[il*m*m+if1*m+if2] *= p_pay->rq[il*ndet*ndet+if1*ndet+if2];
}
}
}
}
}
//sprintf(nm,"rq_after_%s.dat",SC->comp_name);
//write_bin_vector(rq, nm, sizeof(double)*(p_pay->nbins*p_pay->p_model->ndet*p_pay->p_model->ndet), err);
//forwardError(*err,__LINE__,);
}
void free_comp_parametric(void** data) {
SmicaComp *SC;
parametric_smica *p_pay;
SC = *data;
p_pay = SC->data;
free(p_pay->rq);
if (p_pay->nbins!=0) {
free(p_pay->bins);
free(p_pay->bi);
free(p_pay->bo);
free(p_pay->wbins);
}
if (p_pay->wl!=NULL) {
free(p_pay->wl);
}
parametric_free((void**)&(p_pay->p_model));
free(p_pay->A);
free(SC->data);
free(SC);
*data = NULL;
}
void apply_rename(int nkey, char* keys, int nrename, char* rename_from, char* rename_to) {
int i,j;
if (nrename==0) {
return;
}
for(i=0;i<nkey;i++) {
for(j=0;j<nrename;j++) {
if (strcmp(&(keys[i*256]),&(rename_from[j*256]))==0) {
memcpy(&(keys[i*256]),&(rename_to[j*256]),sizeof(char)*256);
break;
}
}
}
}
int base_parametric_cldf_init(cldf *df,int m, double** detlist,int *ndef, char ***defkeys, char*** defvalues, int *nvar, char ***varkeys, error **err) {
int dz,i;
char *keyvartable,*deftable,*valtable;
int nrename;
char *rename_from, *rename_to;
int hk;
*nvar = cldf_readint(df,"ndim",err);
forwardError(*err,__LINE__,0);
nrename=0;
rename_from = NULL;
rename_to = NULL;
hk = cldf_haskey(df,"nrename",err);
forwardError(*err,__LINE__,0);
if (hk ==1) {
nrename = cldf_readint(df,"nrename",err);
forwardError(*err,__LINE__,0);
dz = -1;
rename_from = cldf_readstr(df,"rename_from",&dz, err);
forwardError(*err,__LINE__,0);
dz = -1;
rename_to = cldf_readstr(df,"rename_to",&dz, err);
forwardError(*err,__LINE__,0);
}
dz = -1;
keyvartable = cldf_readstr(df,"keys",&dz, err);
forwardError(*err,__LINE__,0);
if (*nvar!=0) {
*varkeys = malloc_err(sizeof(char*)**nvar,err);
forwardError(*err,__LINE__,0);
} else {
*varkeys = malloc_err(sizeof(char*)*1,err);
forwardError(*err,__LINE__,0);
(*varkeys)[0] = NULL;
}
apply_rename(*nvar,keyvartable,nrename,rename_from,rename_to);
for(i=0;i<*nvar;i++) {
(*varkeys)[i] = &(keyvartable[i*256]);
}
// get defaults
*ndef = cldf_readint(df,"ndef",err);
forwardError(*err,__LINE__,0);
dz = -1;
deftable = cldf_readstr(df,"defaults",&dz, err);
forwardError(*err,__LINE__,0);
dz = -1;
valtable = cldf_readstr(df,"values",&dz, err);
forwardError(*err,__LINE__,0);
if (*ndef!=0) {
*defkeys = malloc_err(sizeof(char*)**ndef,err);
forwardError(*err,__LINE__,0);
*defvalues = malloc_err(sizeof(char*)**ndef,err);
forwardError(*err,__LINE__,0);
} else {
*defkeys = malloc_err(sizeof(char*)*1,err);
forwardError(*err,__LINE__,0);
*defvalues = malloc_err(sizeof(char*)*1,err);
forwardError(*err,__LINE__,0);
(*defkeys)[0] = NULL;
(*defvalues)[0] = NULL;
}
apply_rename(*ndef,deftable,nrename,rename_from,rename_to);
for(i=0;i<*ndef;i++) {
(*defkeys)[i] = &(deftable[i*256]);
(*defvalues)[i] = &(valtable[i*256]);
}
dz = -1;
hk = cldf_haskey(df,"dfreq",err);
forwardError(*err,__LINE__,0);
if (hk ==1) {
*detlist = cldf_readfloatarray(df,"dfreq",&dz, err);
forwardError(*err,__LINE__,0);
} else {
int * ietlist;
ietlist = cldf_readintarray(df,"freq",&dz, err);
forwardError(*err,__LINE__,0);
*detlist = malloc_err(sizeof(double)*dz,err);
forwardError(*err,__LINE__,0);
for(i=0;i<dz;i++) {
(*detlist)[i]=ietlist[i];
}
free(ietlist);
}
free(rename_from);
free(rename_to);
return dz;
}
SmicaComp * finalize_parametric_cldf_init(parametric* p_model,cldf *df,int nb, int m, int nell, int* ell, int* has_cl, double unit,double* wl, double *bins, int nbins,error **err) {
parametric_smica *p_pay;
int i,eb;
char **xnames;
SmicaComp *SC;
int lmin,lmax;
double *color;
int dz;
int nrename;
char *rename_from, *rename_to;
int j;
int nvoid;
int *voidlist;
int hk;
int ncl;
nrename=0;
rename_from = NULL;
rename_to = NULL;
hk = cldf_haskey(df,"nrename",err);
forwardError(*err,__LINE__,NULL);
if (hk ==1) {
nrename = cldf_readint(df,"nrename",err);
forwardError(*err,__LINE__,NULL);
dz = -1;
rename_from = cldf_readstr(df,"rename_from",&dz, err);
forwardError(*err,__LINE__,NULL);
dz = -1;
rename_to = cldf_readstr(df,"rename_to",&dz, err);
forwardError(*err,__LINE__,NULL);
}
hk = cldf_haskey(df,"color",err);
forwardError(*err,__LINE__,NULL);
if (hk ==1) {
dz = -1;
color = cldf_readfloatarray(df,"color",&dz, err);
forwardError(*err,__LINE__,NULL);
if (dz == p_model->ndet) {
double *rolor;
int ii,jj;
dz = p_model->ndet * p_model->ndet;
rolor = malloc_err(sizeof(double)*dz,err);
forwardError(*err,__LINE__,NULL);
for (ii=0;ii<p_model->ndet;ii++) {
for (jj=ii;jj<p_model->ndet;jj++) {
rolor[ii*p_model->ndet+jj] = color[ii]*color[jj];
rolor[jj*p_model->ndet+ii] = color[ii]*color[jj];
}
}
free(color);
color = rolor;
}
testErrorRet(dz != p_model->ndet*p_model->ndet,-12443243,"bad size of color array",*err,__LINE__,NULL);
parametric_set_color(p_model,color,err);
forwardError(*err,__LINE__,NULL);
free(color);
}
hk = cldf_haskey(df,"nvoid",err);
forwardError(*err,__LINE__,NULL);
if (hk ==1) {
nvoid = cldf_readint(df,"nvoid",err);
forwardError(*err,__LINE__,NULL);
dz = nvoid;
voidlist = cldf_readintarray(df,"voidlist",&dz, err);
forwardError(*err,__LINE__,NULL);
parametric_set_void(p_model,nvoid,voidlist,err);
forwardError(*err,__LINE__,NULL);
free(voidlist);
}
lmin = ell[0];
lmax = ell[nell-1];
testErrorRet(nell!=(lmax-lmin+1),-111,"SAFEGARD",*err,__LINE__,NULL);
/*eb = 0;
for(i=1;i<6;i++) {
eb +=has_cl[i];
}
testErrorRet(eb!=0,-7693,"parametric does not work with polarized data yet",*err,__LINE__,NULL);
*/
p_pay = malloc_err(sizeof(parametric_smica),err);
forwardError(*err,__LINE__,NULL);
p_pay->m = m;
p_pay->p_model = p_model;
p_pay->unit = unit;
p_pay->A = cldf_readfloatarray(df,"A_cmb",&m,err);
forwardError(*err,__LINE__,NULL);
p_pay->nell = nell;
p_pay->nbins = 0;
p_pay->bins = NULL;
if (bins !=NULL) {
int li,bi,bn;
//ncl = p_model->has_TEB[0] + p_model->has_TEB[1] + p_model->has_TEB[2] + p_model->has_TEB[0] * p_model->has_TEB[1] + p_model->has_TEB[0] * p_model->has_TEB[2] +p_model->has_TEB[1] * p_model->has_TEB[2];
ncl = has_cl[0] + has_cl[1] + has_cl[2] + has_cl[3] + has_cl[4] + has_cl[5];
//_DEBUGHERE_("ncl %d",ncl);
p_pay->nbins = nbins;
p_pay->bins = malloc_err(sizeof(double)*(ncl*nell*nbins),err);
forwardError(*err,__LINE__,NULL);
memcpy(p_pay->bins,bins,sizeof(double)*nbins*ncl*nell);
p_pay->bi = malloc_err(sizeof(int)*nbins,err);
forwardError(*err,__LINE__,NULL);
p_pay->bo = malloc_err(sizeof(int)*nbins,err);
forwardError(*err,__LINE__,NULL);
p_pay->wbins = malloc_err(sizeof(double)*nell,err);
bn = 0;
for(bi=0;bi<nbins;bi++) {
for(li = 0;li<nell;li++) {
p_pay->bi[bi] = 0;
if (p_pay->bins[bi*ncl*nell+li]!=0) {
p_pay->bi[bi] = li;
break;
}
}
for(li=p_pay->bi[bi];li<nell;li++) {
p_pay->bo[bi] = nell;
if (p_pay->bins[bi*ncl*nell+li]==0) {
p_pay->bo[bi] = li;
break;
}
p_pay->wbins[bn] = p_pay->bins[bi*ncl*nell+li];
bn++;
}
//memcpy(&(p_pay->wbins[bn]),&(p_pay->bins[bi*nell+ p_pay->bi[bi]]),sizeof(double)*( p_pay->bo[bi]- p_pay->bi[bi]));
//bn += p_pay->bo[bi]- p_pay->bi[bi];
}
//write_bin_vector(bins, "bins.dat", sizeof(double)*(nell*nbins), err);
//forwardError(*err,__LINE__,-1);
//write_bin_vector(p_pay->bi, "bi.dat", sizeof(int)*(nbins), err);
//forwardError(*err,__LINE__,-1);
//write_bin_vector(p_pay->bo, "bo.dat", sizeof(int)*(nbins), err);
//forwardError(*err,__LINE__,-1);
//_DEBUGHERE_("%d",bn);
//write_bin_vector(p_pay->wbins, "wb.dat", sizeof(double)*(bn), err);
//forwardError(*err,__LINE__,-1);
}
p_pay->wl = NULL;
if (wl!=NULL) {
p_pay->wl = malloc_err(sizeof(double)*(nell),err);
forwardError(*err,__LINE__,NULL);
memcpy(p_pay->wl,wl,sizeof(double)*nell);
}
p_pay->rq = malloc_err(sizeof(double)*(lmax+1-lmin)*m*m,err);
forwardError(*err,__LINE__,NULL);
memset(p_pay->rq,0,sizeof(double)*(lmax+1-lmin)*m*m);
SC = alloc_SC(p_model->nvar,nb,m,p_pay,&comp_parametric_update,&free_comp_parametric,err);
forwardError(*err,__LINE__,NULL);
p_pay->ismul =0;
hk = cldf_haskey(df,"is_multiplicative",err);
forwardError(*err,__LINE__,NULL);
if (hk ==1) {
p_pay->ismul = cldf_readint(df,"is_multiplicative",err);
forwardError(*err,__LINE__,NULL);
}
if (p_pay->ismul==0) {
SC_isfg(SC);
}
//_DEBUGHERE_("ismul %d",p_pay->ismul);
if (p_model->nvar!=0) {
xnames = malloc_err(sizeof(char*)*(p_model->nvar),err);
forwardError(*err,__LINE__,NULL);
} else{
xnames = malloc_err(sizeof(char*)*1,err);
forwardError(*err,__LINE__,NULL);
}
for(i=0;i<p_model->nvar;i++) {
char *kp;
kp = p_model->varkey[i];
for(j=0;j<nrename;j++) {
if (strcmp(kp,&(rename_to[j*256]))==0) {
kp = &(rename_from[j*256]);
break;
}
}
xnames[i] = kp;
}
SC_setnames(SC, xnames, err);
forwardError(*err,__LINE__,NULL);
free(xnames);
free(rename_from);
free(rename_to);
return SC;
}
//CREATE_PARAMETRIC_FILE_INIT(powerlaw_free_emissivity,powerlaw_free_emissivity_init);
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/Availability.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/LoopHint.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class VersionTuple;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
unsigned short ParenCount, BracketCount, BraceCount;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
mutable IdentifierInfo *Ident_instancetype;
/// \brief Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// \brief Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// \brief Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// \brief Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// \brief Identifier for "message".
IdentifierInfo *Ident_message;
/// \brief Identifier for "strict".
IdentifierInfo *Ident_strict;
/// \brief Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// C++0x contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// \brief When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// \brief RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
unsigned getDepth() const { return Depth; }
};
/// Factory object for creating AttributeList objects.
AttributeFactory AttrFactory;
/// \brief Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// \brief Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
bool SkipFunctionBodies;
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion
/// and balanced tokens must be handled using the specific consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.getKind() == tok::l_paren || Tok.getKind() == tok::r_paren;
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.getKind() == tok::l_square || Tok.getKind() == tok::r_square;
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.getKind() == tok::l_brace || Tok.getKind() == tok::r_brace;
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion);
}
/// \brief Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// \brief Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed);
PP.Lex(Tok);
PP.EnterToken(Next);
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
return ConsumeToken();
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount)
--ParenCount; // Don't let unbalanced )'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount)
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount)
--BraceCount; // Don't let unbalanced }'s drive the count negative.
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// \brief Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// \brief Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// \brief Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// \brief Initialize all pragma handlers.
void initializePragmaHandlers();
/// \brief Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// \brief Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// \brief Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// \brief Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// \brief Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// \brief Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// \brief Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// \brief Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// \brief Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// \brief Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// \brief Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// \brief Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// \brief Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// \brief Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// \brief Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
/// \brief Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// \brief Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken(bool EnteringContext = false,
bool NeedType = false);
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(bool EnteringContext,
bool NeedType,
CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind
TryAnnotateName(bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC1);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// \brief Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// \brief The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// \brief The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// \brief Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified);
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// \brief RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// \brief Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// \brief Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend LLVM_CONSTEXPR SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
CachedTokens *Toks = nullptr)
: Param(P), Toks(Toks) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
CachedTokens *Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// \brief Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// othewise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// \brief The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// \brief Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// \brief Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// \brief Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// othewise, it is a tag declaration.
bool TemplateScope : 1;
/// \brief Whether this class is an __interface.
bool IsInterface : 1;
/// \brief The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// \brief The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// \brief RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// \brief Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// \brief Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// \brief The kind of template we are parsing.
enum {
/// \brief We are not parsing a template at all.
NonTemplate = 0,
/// \brief We are parsing a template declaration.
Template,
/// \brief We are parsing an explicit specialization.
ExplicitSpecialization,
/// \brief We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// \brief The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// \brief The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// \brief The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// \brief Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
AttributeList *AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers& VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc, if non-NULL, is filled with the location of the last token of
// the simple-asm.
ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr);
ExprResult ParseAsmStringLiteral();
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives();
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, Declarator::TheContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstraintExpression();
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
void *Info,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast);
ExprResult ParseCastExpression(bool isUnaryExpression,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
std::function<void()> Completer = nullptr);
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr);
void CheckForLParenAfterColonColon();
//===--------------------------------------------------------------------===//
// C++0x 5.1.2: Lambda expressions
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro,
bool *SkippedInits = nullptr);
bool TryParseLambdaIntroducer(LambdaIntroducer &Intro);
ExprResult ParseLambdaExpressionAfterIntroducer(
LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while condition expression.
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
bool AllowOpenMPStandalone = false);
enum AllowedContsructsKind {
/// \brief Allow any declarations, statements, OpenMP directives.
ACK_Any,
/// \brief Allow only statements and non-standalone OpenMP directives.
ACK_StatementsOpenMPNonStandalone,
/// \brief Allow statements and all executable OpenMP directives
ACK_StatementsOpenMPAnyExecutable
};
StmtResult
ParseStatementOrDeclaration(StmtVector &Stmts, AllowedContsructsKind Allowed,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
AllowedContsructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement();
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs);
StmtResult ParseCaseStatement(bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement();
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
AllowedContsructsKind Allowed,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// \brief Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// \brief Parse the block; this code is always used.
IEB_Parse,
/// \brief Skip the block entirely; this code is never used.
IEB_Skip,
/// \brief Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// \brief Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// \brief The location of the initial keyword.
SourceLocation KeywordLoc;
/// \brief Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// \brief Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// \brief The name we're looking for.
UnqualifiedId Name;
/// \brief The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
AccessSpecifier& CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DSC_normal:
case DSC_class:
case DSC_top_level:
case DSC_objc_method_result:
case DSC_condition:
return false;
case DSC_template_type_arg:
case DSC_type_specifier:
case DSC_trailing:
case DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
DeclGroupPtrTy ParseDeclaration(unsigned Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs);
DeclGroupPtrTy ParseSimpleDeclaration(unsigned Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
bool RequireSemi,
ForRangeInit *FRI = nullptr);
bool MightBeDeclarator(unsigned Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, unsigned Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// \brief When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext getDeclSpecContextFromDeclaratorContext(unsigned Context);
void ParseDeclarationSpecifiers(DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(DeclSpec &DS, AccessSpecifier AS,
DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
Declarator::TheContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// \brief Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// \brief Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// \brief Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// \brief Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified);
/// \brief Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// \brief Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
Error ///< Can't be any of the above!
};
/// \brief Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// \brief Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *HasMissingTypename = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// \brief Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
Declarator::TheContext Context
= Declarator::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
// Check for the start of a C++11 attribute-specifier-seq in a context where
// an attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!getLangOpts().CPlusPlus11 || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!getLangOpts().CPlusPlus11)
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void handleDeclspecAlignBeforeClassKey(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
void ProhibitAttributes(ParsedAttributesWithRange &attrs) {
if (!attrs.Range.isValid()) return;
DiagnoseProhibitedAttributes(attrs);
attrs.clear();
}
void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs);
// Forbid C++11 attributes that appear on certain syntactic
// locations which standard permits but we don't supported yet,
// for example, attributes appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &attrs);
/// \brief Skip C++11 attributes and return the end location of the last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// \brief Diagnose and skip C++11 attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// \brief Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax,
Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
void MaybeParseCXX11Attributes(Declarator &D) {
if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (getLangOpts().CPlusPlus11 &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// \brief Parses a C++-style attribute argument list. Returns true if this
/// results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// \brief Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// \brief Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
AttributeList::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(DeclSpec &DS,
unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true,
bool IdentifierRequired = false);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
Declarator &D,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(unsigned Context, SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
void ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc,
std::vector<IdentifierInfo*>& Ident,
std::vector<SourceLocation>& NamespaceLoc,
unsigned int index, SourceLocation& InlineLoc,
ParsedAttributes& attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, unsigned Context);
Decl *ParseUsingDirectiveOrDeclaration(unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
Decl **OwnedType = nullptr);
Decl *ParseUsingDirective(unsigned Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
Decl *ParseUsingDeclaration(unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, AttributeList *Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// \brief Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// \brief Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// \brief Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// \brief Parses declarative or executable directive.
///
/// \param Allowed ACK_Any, if any directives are allowed,
/// ACK_StatementsOpenMPAnyExecutable - if any executable directives are
/// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone
/// executable directives are allowed.
///
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(AllowedContsructsKind Allowed);
/// \brief Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// \brief Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind);
/// \brief Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind);
/// \brief Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind);
/// \brief Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind);
/// \brief Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
CXXScopeSpec ReductionIdScopeSpec;
DeclarationNameInfo ReductionId;
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val;
OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown;
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
ParsedType ObjectType,
SourceLocation& TemplateKWLoc,
UnqualifiedId &Result);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(unsigned Context,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none,
AttributeList *AccessAttrs = nullptr);
Decl *ParseTemplateDeclarationOrSpecialization(unsigned Context,
SourceLocation &DeclEnd,
AccessSpecifier AS,
AttributeList *AccessAttrs);
Decl *ParseSingleDeclarationAfterTemplate(
unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams,
SourceLocation &DeclEnd,
AccessSpecifier AS=AS_none,
AttributeList *AccessAttrs = nullptr);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<Decl*> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<Decl*> &TemplateParams);
bool isStartOfTemplateTypeParameter();
Decl *ParseTemplateParameter(unsigned Depth, unsigned Position);
Decl *ParseTypeParameter(unsigned Depth, unsigned Position);
Decl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
Decl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(TemplateTy Template,
SourceLocation TemplateNameLoc,
const CXXScopeSpec &SS,
bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true);
void AnnotateTemplateIdTokenAsType();
bool IsTemplateArgumentList(unsigned Skip = 0);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(unsigned Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
3921.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(i, j) collapse(#P12) schedule(#P9, #P11) num_threads(#P11)
#pragma omp parallel for schedule(static, 1) simd
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp target teams distribute #p #p
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
pragma_example.c | /* To compile this program on Linux, try:
make CFLAGS='-std=c99 -Wall' pragma_example
To run:
./pragma_example; echo $?
It should print 0 if OK.
You can even compile it to run on multicore SMP for free with
make CFLAGS='-std=c99 -fopenmp -Wall' pragma_example
To verify there are really some clone() system calls that create the threads:
strace -f ./pragma_example ; echo $?
You can notice that the #pragma smecy are ignored (the project is
on-going :-) ) but that the program produces already correct results in
sequential execution and parallel OpenMP execution.
Enjoy!
Remi.Barrere@thalesgroup.com
Ronan.Keryell@hpc-project.com
for ARTEMIS SMECY European project.
*/
#include <stdbool.h>
/* function Gen
Example of old C89 array use-case where the size is unknown. Note that
this implies some nasty access linearization with array with more than
1 dimension.
*/
void Gen(int *out, int size) {
// Can be executed in parallel
#pragma omp parallel for
for (int i = 0; i < size; i++)
out [i] = 0;
}
/* function Add
Nice C99 array with dynamic size definition. Note this implies having
array size given first
*/
void Add(int size, int in[size], int out[size]) {
// Can be executed in parallel
#pragma omp parallel for
for (int i = 0; i < size; i++)
out [i] = in [i] + 1;
}
/* function Test */
bool Test(int size, int in[size]) {
bool ok = true;
/* Can be executed in parallel, ok is initialized from global value and
at loop exit ok is the && operation between all the local ok
instances: */
#pragma omp parallel for reduction(&&:ok)
for (int i = 0; i < size; i++)
/* We cannot have this simple code here:
if (in [i] != 2)
exit(-1) ;
because a loop or a fonction with exit() cannot be executed in parallel.
Proof: there is a parallel execution interleaving that may execute
some computations in some threads with a greater i that the one
executing the exit() done on another thread. So the causality is
not respected.
Anyway, in an heterogenous execution, just think about how to
implement the exit() operating system call from an
accelerator... No hope. :-)
So use a reduction instead and return the status for later
inspection:
*/
ok &= (in[i] == 2);
// Return false if at least one in[i] is not equal to 2:
return ok;
}
/* main */
int main(int argc, char* argv[]) {
int tab[6][200];
// Gen is mapped on GPP 0, it produced (out) an array written to arg 1:
#pragma smecy map(GPP, 0) arg(1, [6][200], out)
/* Note there is an array linearization here, since we give a 2D array
to Gen() that uses it . This is bad programming style, but it is just
to show it can be handled in the model :-) */
Gen((int *) tab, 200*6);
// Launch different things in parallel:
#pragma omp parallel sections
{
// Do one thing in parallel...
#pragma omp section
{
/* Map this "Add" call to PE 0, arg 2 is communicated as input as an
array of "int [3][200]", and after execution arg 3 is
communicated out as an array of "int [3][200]"
Note the aliasing of the 2 last arguments. Just to show we can
handle it. :*/
#pragma smecy map(PE, 0) arg(2, [3][200], in) arg(3, [3][200], out)
Add(200*3, (int *) tab, (int *) tab);
}
// ...with another thing
#pragma omp section
{
/* Map this "Add" call to PE 1, arg 2 is communicated as input as an
array of "int [3][200]" from address tab[3][0], that is the
second half of tab, and after execution arg 3 is communicated out
as an array of "int [3][200]", that is the second half of tab
Note the aliasing of the 2 last arguments. Just to show we can
handle it. :*/
#pragma smecy map(PE, 1) arg(2, [3][200], in) \
arg(3, [3][200], out)
Add(200*3, &tab[3][0], &tab[3][0]);
}
}
// Launch different things in parallel:
#pragma omp parallel sections
{
#pragma omp section
{
#pragma smecy map(PE, 2) arg(2, [2][200], in) arg(3, [2][200], out)
Add(200*2, (int *) tab, (int *) tab);
}
#pragma omp section
{
#pragma smecy map(PE, 3) arg(2, [2][200], in) arg(3, [2][200], out)
Add(200*2, &tab[2][0], &tab[2][0]);
}
#pragma omp section
{
#pragma smecy map(PE, 4) arg(2, [2][200], in) arg(3, [2][200], out)
Add(200*2, &tab[4][0], &tab[4][0]);
}
}
// An example where arg 2 is just used as a whole implicitly:
#pragma smecy map(GPP, 0) arg(2, in)
bool result = Test(200*6, (int *) tab);
// Return non 0 if the computation went wrong:
return !result;
}
|
GB_binop__eq_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__eq_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__eq_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__eq_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__eq_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int8)
// A*D function (colscale): GB (_AxD__eq_int8)
// D*A function (rowscale): GB (_DxB__eq_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__eq_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__eq_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int8)
// C=scalar+B GB (_bind1st__eq_int8)
// C=scalar+B' GB (_bind1st_tran__eq_int8)
// C=A+scalar GB (_bind2nd__eq_int8)
// C=A'+scalar GB (_bind2nd_tran__eq_int8)
// C type: bool
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_INT8 || GxB_NO_EQ_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__eq_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__eq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__eq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__eq_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__eq_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__eq_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__eq_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__eq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB072-taskdep1-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two tasks with depend clause to ensure execution order:
i is shared for two tasks based on implicit data-sharing attribute rules.
*/
#include <assert.h>
int main()
{
int i=0;
#pragma omp parallel
#pragma omp single
{
#pragma omp task depend (out:i)
i = 1;
#pragma omp task depend (in:i)
i = 2;
}
assert (i==2);
return 0;
}
|
zSchCompUdt-2Ddynamic.c |
/*! @file
* \brief THis file contains the main loop of pdgstrf which involves rank k
* update of the Schur complement.
* Uses 2D partitioning for the scatter phase.
*
* <pre>
* -- Distributed SuperLU routine (version 4.1) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* October 1, 2014
*
*/
#define SCHEDULE_STRATEGY guided
double tt_start;
double tt_end;
if ( msg0 && msg2 ) { /* L(:,k) and U(k,:) are not empty. */
int cum_nrow=0;
int temp_nbrow;
lptr = lptr0;
luptr = luptr0;
/**
* seperating L blocks
*/
int lookAheadBlk=0, RemainBlk=0;
tt_start = SuperLU_timer_();
for (int i = 0; i < nlb; ++i) {
ib = lsub[lptr]; /* Row block L(i,k). */
temp_nbrow = lsub[lptr+1]; /* Number of full rows. */
int look_up_flag=1;
for (int j = k0+1; j < SUPERLU_MIN (k0 + num_look_aheads+2, nsupers ); ++j)
{
if(ib == perm_c_supno[j]) look_up_flag=0;
}
if(!look_up_flag) {
/* ib is within look up window */
if (lookAheadBlk==0) {
lookAheadFullRow[lookAheadBlk] = temp_nbrow;
} else {
lookAheadFullRow[lookAheadBlk] = temp_nbrow+lookAheadFullRow[lookAheadBlk-1];
}
lookAheadStRow[lookAheadBlk] = cum_nrow;
lookAhead_lptr[lookAheadBlk] = lptr;
lookAhead_ib[lookAheadBlk] = ib;
lookAheadBlk++;
} else { /* ib is not in look up window */
if (RemainBlk==0) {
Remain_info[RemainBlk].FullRow = temp_nbrow;
} else {
Remain_info[RemainBlk].FullRow = temp_nbrow+Remain_info[RemainBlk-1].FullRow;
}
RemainStRow[RemainBlk] = cum_nrow;
// Remain_lptr[RemainBlk] = lptr;
Remain_info[RemainBlk].lptr = lptr;
// Remain_ib[RemainBlk] = ib;
Remain_info[RemainBlk].ib = ib;
RemainBlk++;
}
cum_nrow +=temp_nbrow;
lptr += LB_DESCRIPTOR; /* Skip descriptor. */
lptr += temp_nbrow;
luptr += temp_nbrow;
} /* for i ... */
lptr = lptr0;
luptr = luptr0;
/* leading dimension of L buffer */
#if 0
int LDlookAhead_LBuff = lookAheadFullRow[lookAheadBlk-1]; /* may go negative.*/
#else /* Piyush fix */
int LDlookAhead_LBuff = lookAheadBlk==0? 0 :lookAheadFullRow[lookAheadBlk-1];
#endif
/* #pragma omp parallel for */
for (int i = 0; i < lookAheadBlk; ++i) {
int StRowDest = 0;
int temp_nbrow;
if (i==0) {
temp_nbrow = lookAheadFullRow[0];
} else {
StRowDest = lookAheadFullRow[i-1];
temp_nbrow = lookAheadFullRow[i]-lookAheadFullRow[i-1];
}
int StRowSource=lookAheadStRow[i];
/* Now copying the matrix*/
// #pragma omp parallel for (gives slow down)
for (int j = 0; j < knsupc; ++j) {
memcpy(&lookAhead_L_buff[StRowDest+j*LDlookAhead_LBuff],
&lusup[luptr+j*nsupr+StRowSource],
temp_nbrow * sizeof(doublecomplex) );
}
}
int LDRemain_LBuff = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < RemainBlk; ++i) {
int StRowDest = 0;
int temp_nbrow;
if (i==0) {
temp_nbrow = Remain_info[0].FullRow;
} else {
StRowDest = Remain_info[i-1].FullRow;
temp_nbrow = Remain_info[i].FullRow-Remain_info[i-1].FullRow;
}
int StRowSource=RemainStRow[i];
/* Now copying the matrix*/
// #pragma omp parallel for (gives slow down)
for (int j = 0; j < knsupc; ++j) {
// printf("StRowDest %d LDRemain_LBuff %d StRowSource %d \n", StRowDest ,LDRemain_LBuff ,StRowSource );
memcpy(&Remain_L_buff[StRowDest+j*LDRemain_LBuff],
&lusup[luptr+j*nsupr+StRowSource],
temp_nbrow * sizeof(doublecomplex) );
}
} /* parallel for i ... */
tt_end = SuperLU_timer_();
LookAheadRowSepTimer += tt_end-tt_start;
#if 0
LookAheadRowSepMOP += 2*knsupc*(lookAheadFullRow[lookAheadBlk-1]+Remain_info[RemainBlk-1].FullRow );
#else
int_t lnbrow, rnbrow;
lnbrow = lookAheadBlk==0 ? 0 : lookAheadFullRow[lookAheadBlk-1];
rnbrow = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow;
nbrow = lnbrow + rnbrow;
LookAheadRowSepMOP += 2*knsupc*(nbrow);
#endif
ldu =0;
full =1;
/*updating lookahead rows */
tt_start = SuperLU_timer_();
#if 0
nbrow = lookAheadFullRow[lookAheadBlk-1]+Remain_info[RemainBlk-1].FullRow;
#endif
if ( nbrow>0 ) {
/*
* counting U blocks
*/
ncols=0;
ldu=0;
full=1;
int temp_ncols=0;
for (j = jj0; j < nub; ++j) {
temp_ncols=0;
arrive_at_ublock(
j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid
);
Ublock_info[j].iukp = iukp;
Ublock_info[j].rukp = rukp;
Ublock_info[j].jb = jb;
/* Prepare to call GEMM. */
jj = iukp;
for (; jj < iukp+nsupc; ++jj) {
segsize = klst - usub[jj];
if ( segsize ) {
++temp_ncols;
if ( segsize != ldu ) full = 0;
if ( segsize > ldu ) ldu = segsize;
}
}
Ublock_info[j].full_u_cols = temp_ncols;
ncols += temp_ncols;
}
/* Now doing prefix sum on on full_u_cols */
for ( j = jj0+1; j < nub; ++j) {
Ublock_info[j].full_u_cols += Ublock_info[j-1].full_u_cols;
}
tempu = bigU;
#ifdef _OPENMP
#pragma omp parallel for private(j,iukp,rukp,tempu, jb, nsupc,ljb,segsize,\
lead_zero, jj, i) \
default (shared) schedule(SCHEDULE_STRATEGY)
#endif
for (j = jj0; j < nub; ++j) {
if(j==jj0) tempu = bigU;
else tempu = bigU + ldu*Ublock_info[j-1].full_u_cols;
/* == processing each of the remaining columns == */
arrive_at_ublock(j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid);
for (jj = iukp; jj < iukp+nsupc; ++jj) {
segsize = klst - usub[jj];
if ( segsize ) {
lead_zero = ldu - segsize;
for (i = 0; i < lead_zero; ++i) tempu[i] = zero;
tempu += lead_zero;
for (i = 0; i < segsize; ++i) tempu[i] = uval[rukp+i];
rukp += segsize;
tempu += segsize;
}
}
rukp -= usub[iukp - 1]; /* Return to start of U(k,j). */
} /* parallel for j:jjj_st..jjj */
tempu = bigU; //setting it to starting of the matrix
} /* if(nbrow>0) */
tt_end = SuperLU_timer_();
GatherTimer += tt_end-tt_start;
GatherMOP += 2*ldu*ncols;
int Lnbrow = lookAheadBlk==0 ? 0 :lookAheadFullRow[lookAheadBlk-1];
int Rnbrow = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow;
int jj_cpu=nub; /*limit between CPU and GPU */
tempv = bigV;
if (Lnbrow>0 && ldu >0 && ncols>0) {
ncols = Ublock_info[nub-1].full_u_cols;
schur_flop_counter += 2 * (double)Lnbrow * (double)ldu * (double)ncols;
stat->ops[FACT] += 2 * (double)Lnbrow * (double)ldu * (double)ncols;
tt_start = SuperLU_timer_();
#ifdef _OPENMP
#pragma omp parallel for default (shared) \
private (j,i,lb,rukp,iukp,jb,nsupc,ljb,lptr,ib,temp_nbrow,cum_nrow) \
schedule(dynamic)
#endif
for (int ij = 0; ij < lookAheadBlk*(nub-jj0); ++ij) {
int j = ij/lookAheadBlk + jj0;
int lb = ij%lookAheadBlk;
#ifdef _OPENMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
int* indirect_thread = indirect + ldt*thread_id;
int* indirect2_thread = indirect2 + ldt*thread_id;
doublecomplex* tempv1 = bigV + thread_id*ldt*ldt;
/* Getting U block information */
/* unsigned long long ut_start, ut_end; */
int_t rukp = Ublock_info[j].rukp;
int_t iukp = Ublock_info[j].iukp;
int jb = Ublock_info[j].jb;
int nsupc = SuperSize(jb);
int ljb = LBj (jb, grid);
int st_col;
int ncols;
if (j>jj0) {
ncols = Ublock_info[j].full_u_cols-Ublock_info[j-1].full_u_cols;
st_col = Ublock_info[j-1].full_u_cols;
} else {
ncols = Ublock_info[j].full_u_cols;
st_col = 0;
}
/* Getting L block information */
int_t lptr = lookAhead_lptr[lb];
int ib = lookAhead_ib[lb];
int temp_nbrow = lsub[lptr+1];
lptr += LB_DESCRIPTOR;
int cum_nrow = (lb==0 ? 0 : lookAheadFullRow[lb-1]);
#if defined (USE_VENDOR_BLAS)
zgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha,
&lookAhead_L_buff[(knsupc-ldu)*Lnbrow+cum_nrow], &Lnbrow,
&tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow, 1, 1);
#else
zgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha,
&lookAhead_L_buff[(knsupc-ldu)*Lnbrow+cum_nrow], &Lnbrow,
&tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow);
#endif
if ( ib < jb ) {
zscatter_u (
ib, jb,
nsupc, iukp,xsup,
klst, temp_nbrow,
lptr, temp_nbrow,lsub,
usub, tempv1,
Ufstnz_br_ptr,
Unzval_br_ptr,
grid
);
} else {
zscatter_l (
ib, ljb, nsupc,iukp,xsup,klst,temp_nbrow,lptr,
temp_nbrow,usub,lsub,tempv1,
indirect_thread, indirect2_thread,
Lrowind_bc_ptr,Lnzval_bc_ptr,grid
);
}
} /* for ij = ... */
tt_end = SuperLU_timer_();
LookAheadGEMMTimer += tt_end- tt_start;
LookAheadGEMMFlOp += 2 * (double ) Lnbrow * (double )ldu * (double )ncols;
stat->ops[FACT] += 2 * (double ) Lnbrow * (double )ldu * (double )ncols;
LookAheadScatterTimer += tt_end-tt_start;
LookAheadScatterMOP += 3*Lnbrow*ncols;
} /* if Lnbrow < ... */
/***************************************************************
* Updating remaining rows and column on CPU
***************************************************************/
Rnbrow = RemainBlk==0 ? 0 : Remain_info[RemainBlk-1].FullRow;
ncols = jj_cpu==0 ? 0 : Ublock_info[jj_cpu-1].full_u_cols;
schur_flop_counter += 2 * (double)Rnbrow * (double)ldu * (double)ncols;
stat->ops[FACT] += 2 * (double)Rnbrow * (double)ldu * (double)ncols;
tt_start = SuperLU_timer_();
#ifdef _OPENMP
#pragma omp parallel for default (shared) \
private (j,i,lb,rukp,iukp,jb,nsupc,ljb,lptr,ib,temp_nbrow,cum_nrow) \
schedule(dynamic)
#endif
for (int ij = 0; ij < RemainBlk*(jj_cpu-jj0); ++ij) {
int j = ij / RemainBlk + jj0;
int lb = ij % RemainBlk;
#ifdef _OPENMP
int thread_id = omp_get_thread_num();
#else
int thread_id = 0;
#endif
int* indirect_thread = indirect + ldt*thread_id;
int* indirect2_thread = indirect2 + ldt*thread_id;
doublecomplex* tempv1 = bigV + thread_id*ldt*ldt;
/* Getting U block information */
/* unsigned long long ut_start, ut_end; */
int_t rukp = Ublock_info[j].rukp;
int_t iukp = Ublock_info[j].iukp;
int jb = Ublock_info[j].jb;
int nsupc = SuperSize(jb);
int ljb = LBj (jb, grid);
int st_col;
int ncols;
if (j>jj0) {
ncols = Ublock_info[j].full_u_cols-Ublock_info[j-1].full_u_cols;
st_col = Ublock_info[j-1].full_u_cols;
} else {
ncols = Ublock_info[j].full_u_cols;
st_col = 0;
}
/* Getting L block information */
int_t lptr = Remain_info[lb].lptr;
int ib = Remain_info[lb].ib;
int temp_nbrow = lsub[lptr+1];
lptr += LB_DESCRIPTOR;
int cum_nrow = (lb==0 ? 0 : Remain_info[lb-1].FullRow);
/* calling GEMM */
#if defined (USE_VENDOR_BLAS)
zgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha,
&Remain_L_buff[(knsupc-ldu)*Rnbrow+cum_nrow], &Rnbrow,
&tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow, 1, 1);
#else
zgemm_("N", "N", &temp_nbrow, &ncols, &ldu, &alpha,
&Remain_L_buff[(knsupc-ldu)*Rnbrow+cum_nrow], &Rnbrow,
&tempu[st_col*ldu], &ldu, &beta, tempv1, &temp_nbrow);
#endif
/* Now scattering the block */
if ( ib<jb ) {
zscatter_u (
ib, jb,
nsupc, iukp,xsup,
klst, temp_nbrow,
lptr, temp_nbrow,lsub,
usub, tempv1,
Ufstnz_br_ptr,
Unzval_br_ptr,
grid
);
} else {
zscatter_l (
ib, ljb, nsupc,iukp,xsup,klst,temp_nbrow,lptr,
temp_nbrow,usub,lsub,tempv1,
indirect_thread, indirect2_thread,
Lrowind_bc_ptr,Lnzval_bc_ptr,grid
);
}
} /* for (int ij =... */
} /* if k L(:,k) and U(k,:) are not empty */
|
pr61200.c | /* PR libgomp/61200 */
/* { dg-do run } */
#include <omp.h>
#include <stdlib.h>
#include <unistd.h>
volatile int x;
void
foo ()
{
int var = 1;
int i;
for (i = 0; i < 2; i++)
{
if (i == 1)
{
#pragma omp parallel num_threads(2)
if (x)
var++;
else
{
#pragma omp single
sleep (2);
}
}
else
{
#pragma omp task shared(var)
{
sleep (1);
var = 2;
}
}
}
#pragma omp taskwait
if (var != 2)
abort ();
}
void
bar ()
{
int var = 1;
int i;
for (i = 0; i < 2; i++)
{
if (i == 0)
{
#pragma omp task shared(var)
{
sleep (1);
var = 2;
}
}
else
{
#pragma omp parallel num_threads(2)
if (x)
var++;
else
{
#pragma omp single
sleep (2);
}
}
}
#pragma omp taskwait
if (var != 2)
abort ();
}
int
main ()
{
omp_set_nested (1);
#pragma omp parallel num_threads(2)
#pragma omp single
foo ();
#pragma omp parallel num_threads(2)
#pragma omp single
bar ();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.