source
stringlengths
3
92
c
stringlengths
26
2.25M
par_csr_matvec.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include <assert.h> /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec *--------------------------------------------------------------------------*/ // y = alpha*A*x + beta*b HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *x_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride( x_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( x_local ); HYPRE_Complex *x_tmp_data, **x_buf_data; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); hypre_HandleCudaComputeStreamSyncPush(hypre_handle, 0); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_assert( idxstride>0 ); if (num_cols != x_size) { ierr = 11; } if (num_rows != y_size || num_rows != b_size) { ierr = 12; } if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) { ierr = 13; } hypre_assert( hypre_VectorNumVectors(b_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); } else { hypre_assert( num_vectors > 1 ); x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors ); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* x_tmp */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP) /* for GPU and single vector, alloc persistent memory for x_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); } hypre_VectorData(x_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(x_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(x_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); hypre_SeqVectorSetDataOwner(x_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(x_tmp, HYPRE_MEMORY_DEVICE); x_tmp_data = hypre_VectorData(x_tmp); /* x_buff_data */ x_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } x_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM x_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); continue; #endif } x_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); hypre_SeqVectorPrefetch(x_local, HYPRE_MEMORY_DEVICE); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *send_data = (HYPRE_Complex *) x_buf_data[jv]; HYPRE_Complex *locl_data = x_local_data + jv * vecstride; /* if on device, no need to Sync: send_data is on device memory */ #if defined(HYPRE_USING_CUDA) /* pack send data on device */ HYPRE_THRUST_CALL( gather, hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) + hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), locl_data, send_data ); #elif defined(HYPRE_USING_DEVICE_OPENMP) /* pack send data on device */ HYPRE_Int i; HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #pragma omp target teams distribute parallel for private(i) is_device_ptr(send_data, locl_data, device_send_map_elmts) for (i = start; i < end; i++) { send_data[i] = locl_data[device_send_map_elmts[i]]; } #else HYPRE_Int i; /* pack send data on host */ #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { send_data[i] = locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)]; } #endif } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication starts */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg, HYPRE_MEMORY_DEVICE, x_buf_data[jv], HYPRE_MEMORY_DEVICE, &x_tmp_data[jv*num_cols_offd] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* computation offd part */ if (num_cols_offd) { hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local ); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP) if (jv == 0) { continue; } #endif hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); } hypre_HandleCudaComputeStreamSyncPop(hypre_handle); hypre_SyncCudaComputeStream(hypre_handle); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif return ierr; } HYPRE_Int hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvecT * * Performs y <- alpha * A^T * x + beta * y * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *diagT = hypre_ParCSRMatrixDiagT(A); hypre_CSRMatrix *offdT = hypre_ParCSRMatrixOffdT(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *y_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride(y_local); HYPRE_Int idxstride = hypre_VectorIndexStride(y_local); HYPRE_Complex *y_tmp_data, **y_buf_data; HYPRE_Complex *y_local_data = hypre_VectorData(y_local); hypre_HandleCudaComputeStreamSyncPush(hypre_handle, 0); /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_rows != x_size) { ierr = 1; } if (num_cols != y_size) { ierr = 2; } if (num_rows != x_size && num_cols != y_size) { ierr = 3; } hypre_assert( hypre_VectorNumVectors(x_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { y_tmp = hypre_SeqVectorCreate(num_cols_offd); } else { hypre_assert( num_vectors > 1 ); y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd, num_vectors); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* y_tmp */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP) /* for GPU and single vector, alloc persistent memory for y_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); } hypre_VectorData(y_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(y_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(y_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); hypre_SeqVectorSetDataOwner(y_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(y_tmp, HYPRE_MEMORY_DEVICE); y_tmp_data = hypre_VectorData(y_tmp); /* y_buf_data */ y_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } y_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM y_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); continue; #endif } y_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif if (num_cols_offd) { if (offdT) { // offdT is optional. Used only if it's present hypre_CSRMatrixMatvec(alpha, offdT, x_local, 0.0, y_tmp); } else { hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { /* this is where we assume multivectors are 'column' storage */ comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 2, comm_pkg, HYPRE_MEMORY_DEVICE, &y_tmp_data[jv*num_cols_offd], HYPRE_MEMORY_DEVICE, y_buf_data[jv] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ if (diagT) { // diagT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, diagT, x_local, beta, y_local); } else { hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *recv_data = (HYPRE_Complex *) y_buf_data[jv]; HYPRE_Complex *locl_data = y_local_data + jv * vecstride; #if defined(HYPRE_USING_CUDA) /* unpack recv data on device */ hypreDevice_GenScatterAdd(locl_data, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), recv_data); #elif defined(HYPRE_USING_DEVICE_OPENMP) HYPRE_Int i, j; /* unpack recv data on device */ for (i = 0; i < num_sends; i++) { HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); #pragma omp target teams distribute parallel for private(j) is_device_ptr(recv_data, locl_data, device_send_map_elmts) for (j = start; j < end; j++) { locl_data[device_send_map_elmts[j]] += recv_data[j]; } } #else HYPRE_Int i; /* unpack recv data on host, TODO OMP? */ for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)] += recv_data[i]; } #endif } hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_DEVICE_OPENMP) if (jv == 0) { continue; } #endif hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST); } hypre_HandleCudaComputeStreamSyncPop(hypre_handle); hypre_SyncCudaComputeStream(hypre_handle); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y, HYPRE_Int *CF_marker, HYPRE_Int fpt ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, index, start, num_procs; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Complex *x_tmp_data = NULL; HYPRE_Complex *x_buf_data = NULL; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm,&num_procs); if (num_cols != x_size) ierr = 11; if (num_rows != y_size) ierr = 12; if (num_cols != x_size && num_rows != y_size) ierr = 13; if (num_procs > 1) { if (num_cols_offd) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_sends) x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) x_buf_data[index++] = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data ); } hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker, CF_marker, fpt); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_sends) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,int_buf_data,CF_marker_offd ); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local, CF_marker, CF_marker_offd, fpt); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); } return ierr; }
parallel_sections_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s void foo(); // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel sections'}} #pragma omp parallel sections // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel sections'}} #pragma omp parallel sections foo void test_no_clause() { int i; #pragma omp parallel sections { foo(); } // expected-error@+2 {{the statement for '#pragma omp parallel sections' must be a compound statement}} #pragma omp parallel sections ++i; #pragma omp parallel sections { foo(); foo(); // expected-error {{statement in 'omp parallel sections' directive must be enclosed into a section region}} } } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel sections { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } #pragma omp section if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L3; else if (i == 8) { L3: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; goto L3; // expected-error {{use of undeclared label 'L3'}} } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel sections' are ignored}} #pragma omp parallel sections foo bar { foo(); // expected-error@+1 {{unexpected OpenMP clause 'nowait' in directive '#pragma omp section'}} #pragma omp section nowait ; } } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel sections' are ignored}} #pragma omp parallel sections; { foo(); } // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp parallel sections'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel sections' are ignored}} #pragma omp parallel sections linear(x); { foo(); } // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel sections' are ignored}} #pragma omp parallel sections private(x); { foo(); } // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel sections' are ignored}} #pragma omp parallel sections, private(x); { foo(); } } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel sections private( { foo(); } // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel sections private(, { foo(); } // expected-error@+1 2 {{expected expression}} #pragma omp parallel sections private(, ) { foo(); } // expected-error@+1 {{expected expression}} #pragma omp parallel sections private() { foo(); } // expected-error@+1 {{expected expression}} #pragma omp parallel sections private(int) { foo(); } // expected-error@+1 {{expected variable name}} #pragma omp parallel sections private(0) { foo(); } int x, y, z; #pragma omp parallel sections private(x) { foo(); } #pragma omp parallel sections private(x, y) { foo(); } #pragma omp parallel sections private(x, y, z) { foo(); } } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel sections lastprivate( { foo(); } // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel sections lastprivate(, { foo(); } // expected-error@+1 2 {{expected expression}} #pragma omp parallel sections lastprivate(, ) { foo(); } // expected-error@+1 {{expected expression}} #pragma omp parallel sections lastprivate() { foo(); } // expected-error@+1 {{expected expression}} #pragma omp parallel sections lastprivate(int) { foo(); } // expected-error@+1 {{expected variable name}} #pragma omp parallel sections lastprivate(0) { foo(); } int x, y, z; #pragma omp parallel sections lastprivate(x) { foo(); } #pragma omp parallel sections lastprivate(x, y) { foo(); } #pragma omp parallel sections lastprivate(x, y, z) { foo(); } } void test_firstprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel sections firstprivate( { foo(); } // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel sections firstprivate(, { foo(); } // expected-error@+1 2 {{expected expression}} #pragma omp parallel sections firstprivate(, ) { foo(); } // expected-error@+1 {{expected expression}} #pragma omp parallel sections firstprivate() { foo(); } // expected-error@+1 {{expected expression}} #pragma omp parallel sections firstprivate(int) { foo(); } // expected-error@+1 {{expected variable name}} #pragma omp parallel sections firstprivate(0) { foo(); } int x, y, z; #pragma omp parallel sections lastprivate(x) firstprivate(x) { foo(); } #pragma omp parallel sections lastprivate(x, y) firstprivate(x, y) { foo(); } #pragma omp parallel sections lastprivate(x, y, z) firstprivate(x, y, z) { foo(); } }
random.h
#ifndef RANDOM_H_INCLUDED #define RANDOM_H_INCLUDED // See random.cpp for notes. #include <cstdint> #include <climits> namespace BS { struct RandomUintGenerator{ private: // for the Marsaglia algorithm uint32_t rngx; uint32_t rngy; uint32_t rngz; uint32_t rngc; // for the Jenkins algorithm uint32_t a, b, c, d; public: void initialize(); // must be called to seed the RNG uint32_t operator()(); unsigned operator()(unsigned min, unsigned max); }; // The globally-scoped random number generator. Declaring it // threadprivate causes each thread to instantiate a private instance. extern RandomUintGenerator randomUint; #pragma omp threadprivate(randomUint) constexpr uint32_t RANDOM_UINT_MAX = 0xffffffff; } // end namespace BS #endif // RANDOM_H_INCLUDED
convolution_3x3_pack1to8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack1to8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); __m256 _bias1 = bias ? _mm256_loadu_ps((const float*)bias + (p + 1) * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); out1.fill(_bias1); const float* k0 = kernel.channel(p); const float* k1 = kernel.channel(p + 1); for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00_0 = _mm256_loadu_ps(k0); __m256 _k01_0 = _mm256_loadu_ps(k0 + 8); __m256 _k02_0 = _mm256_loadu_ps(k0 + 16); __m256 _k10_0 = _mm256_loadu_ps(k0 + 24); __m256 _k11_0 = _mm256_loadu_ps(k0 + 32); __m256 _k12_0 = _mm256_loadu_ps(k0 + 40); __m256 _k20_0 = _mm256_loadu_ps(k0 + 48); __m256 _k21_0 = _mm256_loadu_ps(k0 + 56); __m256 _k22_0 = _mm256_loadu_ps(k0 + 64); __m256 _k00_1 = _mm256_loadu_ps(k1); __m256 _k01_1 = _mm256_loadu_ps(k1 + 8); __m256 _k02_1 = _mm256_loadu_ps(k1 + 16); __m256 _k10_1 = _mm256_loadu_ps(k1 + 24); __m256 _k11_1 = _mm256_loadu_ps(k1 + 32); __m256 _k12_1 = _mm256_loadu_ps(k1 + 40); __m256 _k20_1 = _mm256_loadu_ps(k1 + 48); __m256 _k21_1 = _mm256_loadu_ps(k1 + 56); __m256 _k22_1 = _mm256_loadu_ps(k1 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _sum11 = _mm256_loadu_ps(outptr1 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); _sum01 = _mm256_comp_fmadd_ps(_r02, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r03, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r12, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r22, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k22_0, _sum01); _sum11 = _mm256_comp_fmadd_ps(_r02, _k00_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r03, _k01_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r04, _k02_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r12, _k10_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r13, _k11_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r14, _k12_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r22, _k20_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r23, _k21_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r24, _k22_1, _sum11); _mm256_storeu_ps(outptr0 + 8, _sum01); _mm256_storeu_ps(outptr1 + 8, _sum11); __m256 _sum02 = _mm256_loadu_ps(outptr0 + 16); __m256 _sum12 = _mm256_loadu_ps(outptr1 + 16); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum02 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum02); _sum12 = _mm256_comp_fmadd_ps(_r03, _k00_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r04, _k01_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r05, _k02_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r13, _k10_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r14, _k11_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r15, _k12_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r23, _k20_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r24, _k21_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r25, _k22_1, _sum12); _mm256_storeu_ps(outptr0 + 16, _sum02); _mm256_storeu_ps(outptr1 + 16, _sum12); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); __m256 _sum03 = _mm256_loadu_ps(outptr0 + 24); __m256 _sum13 = _mm256_loadu_ps(outptr1 + 24); _sum03 = _mm256_comp_fmadd_ps(_r04, _k00_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r05, _k01_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r06, _k02_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r14, _k10_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r15, _k11_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r16, _k12_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r24, _k20_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r25, _k21_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r26, _k22_0, _sum03); _sum13 = _mm256_comp_fmadd_ps(_r04, _k00_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r05, _k01_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r06, _k02_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r14, _k10_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r15, _k11_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r16, _k12_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r24, _k20_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r25, _k21_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r26, _k22_1, _sum13); _mm256_storeu_ps(outptr0 + 24, _sum03); _mm256_storeu_ps(outptr1 + 24, _sum13); r0 += 4; r1 += 4; r2 += 4; outptr0 += 32; outptr1 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _sum11 = _mm256_loadu_ps(outptr1 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); _sum01 = _mm256_comp_fmadd_ps(_r02, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r03, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r12, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r22, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k22_0, _sum01); _sum11 = _mm256_comp_fmadd_ps(_r02, _k00_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r03, _k01_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r04, _k02_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r12, _k10_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r13, _k11_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r14, _k12_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r22, _k20_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r23, _k21_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r24, _k22_1, _sum11); _mm256_storeu_ps(outptr0 + 8, _sum01); _mm256_storeu_ps(outptr1 + 8, _sum11); r0 += 2; r1 += 2; r2 += 2; outptr0 += 16; outptr1 += 16; } for (; j < outw; j++) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); r0 += 1; r1 += 1; r2 += 1; outptr0 += 8; outptr1 += 8; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 8; k1 += 9 * 8; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); const float* k0 = kernel.channel(p); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00 = _mm256_loadu_ps(k0); __m256 _k01 = _mm256_loadu_ps(k0 + 8); __m256 _k02 = _mm256_loadu_ps(k0 + 16); __m256 _k10 = _mm256_loadu_ps(k0 + 24); __m256 _k11 = _mm256_loadu_ps(k0 + 32); __m256 _k12 = _mm256_loadu_ps(k0 + 40); __m256 _k20 = _mm256_loadu_ps(k0 + 48); __m256 _k21 = _mm256_loadu_ps(k0 + 56); __m256 _k22 = _mm256_loadu_ps(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum0 = _mm256_comp_fmadd_ps(_r01, _k00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r02, _k01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r03, _k02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r11, _k10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r12, _k11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r13, _k12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r21, _k20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r22, _k21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r23, _k22, _sum0); __m256 _sum1 = _mm256_loadu_ps(outptr0 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r02, _k00, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r03, _k01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r04, _k02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r12, _k10, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r13, _k11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r14, _k12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r22, _k20, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r23, _k21, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r24, _k22, _sum1); __m256 _sum2 = _mm256_loadu_ps(outptr0 + 16); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_comp_fmadd_ps(_r03, _k00, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r04, _k01, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r05, _k02, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r13, _k10, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r14, _k11, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r15, _k12, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r23, _k20, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r24, _k21, _sum2); _sum2 = _mm256_comp_fmadd_ps(_r25, _k22, _sum2); __m256 _sum3 = _mm256_loadu_ps(outptr0 + 24); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_comp_fmadd_ps(_r04, _k00, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r05, _k01, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r06, _k02, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r14, _k10, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r15, _k11, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r16, _k12, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r24, _k20, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r25, _k21, _sum3); _sum3 = _mm256_comp_fmadd_ps(_r26, _k22, _sum3); _mm256_storeu_ps(outptr0 + 24, _sum3); r0 += 4; r1 += 4; r2 += 4; outptr0 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum0 = _mm256_comp_fmadd_ps(_r01, _k00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r02, _k01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r03, _k02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r11, _k10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r12, _k11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r13, _k12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r21, _k20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r22, _k21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r23, _k22, _sum0); __m256 _sum1 = _mm256_loadu_ps(outptr0 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_r02, _k00, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r03, _k01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r04, _k02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r12, _k10, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r13, _k11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r14, _k12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r22, _k20, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r23, _k21, _sum1); _sum1 = _mm256_comp_fmadd_ps(_r24, _k22, _sum1); _mm256_storeu_ps(outptr0 + 8, _sum1); r0 += 2; r1 += 2; r2 += 2; outptr0 += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum0 = _mm256_comp_fmadd_ps(_r01, _k00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r02, _k01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r03, _k02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r11, _k10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r12, _k11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r13, _k12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r21, _k20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r22, _k21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_r23, _k22, _sum0); _mm256_storeu_ps(outptr0, _sum0); r0 += 1; r1 += 1; r2 += 1; outptr0 += 8; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * 8; } } } static void conv3x3s2_pack1to8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); __m256 _bias1 = bias ? _mm256_loadu_ps((const float*)bias + (p + 1) * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); out1.fill(_bias1); const float* k0 = kernel.channel(p); const float* k1 = kernel.channel(p + 1); for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00_0 = _mm256_loadu_ps(k0); __m256 _k01_0 = _mm256_loadu_ps(k0 + 8); __m256 _k02_0 = _mm256_loadu_ps(k0 + 16); __m256 _k10_0 = _mm256_loadu_ps(k0 + 24); __m256 _k11_0 = _mm256_loadu_ps(k0 + 32); __m256 _k12_0 = _mm256_loadu_ps(k0 + 40); __m256 _k20_0 = _mm256_loadu_ps(k0 + 48); __m256 _k21_0 = _mm256_loadu_ps(k0 + 56); __m256 _k22_0 = _mm256_loadu_ps(k0 + 64); __m256 _k00_1 = _mm256_loadu_ps(k1); __m256 _k01_1 = _mm256_loadu_ps(k1 + 8); __m256 _k02_1 = _mm256_loadu_ps(k1 + 16); __m256 _k10_1 = _mm256_loadu_ps(k1 + 24); __m256 _k11_1 = _mm256_loadu_ps(k1 + 32); __m256 _k12_1 = _mm256_loadu_ps(k1 + 40); __m256 _k20_1 = _mm256_loadu_ps(k1 + 48); __m256 _k21_1 = _mm256_loadu_ps(k1 + 56); __m256 _k22_1 = _mm256_loadu_ps(k1 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _sum11 = _mm256_loadu_ps(outptr1 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum01 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum01); _sum11 = _mm256_comp_fmadd_ps(_r03, _k00_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r04, _k01_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r05, _k02_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r13, _k10_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r14, _k11_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r15, _k12_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r23, _k20_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r24, _k21_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r25, _k22_1, _sum11); _mm256_storeu_ps(outptr0 + 8, _sum01); _mm256_storeu_ps(outptr1 + 8, _sum11); __m256 _sum02 = _mm256_loadu_ps(outptr0 + 16); __m256 _sum12 = _mm256_loadu_ps(outptr1 + 16); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); __m256 _r07 = _mm256_broadcast_ss(r0 + 6); __m256 _r17 = _mm256_broadcast_ss(r1 + 6); __m256 _r27 = _mm256_broadcast_ss(r2 + 6); _sum02 = _mm256_comp_fmadd_ps(_r05, _k00_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r06, _k01_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r07, _k02_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r15, _k10_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r16, _k11_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r17, _k12_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r25, _k20_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r26, _k21_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r27, _k22_0, _sum02); _sum12 = _mm256_comp_fmadd_ps(_r05, _k00_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r06, _k01_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r07, _k02_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r15, _k10_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r16, _k11_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r17, _k12_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r25, _k20_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r26, _k21_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r27, _k22_1, _sum12); _mm256_storeu_ps(outptr0 + 16, _sum02); _mm256_storeu_ps(outptr1 + 16, _sum12); __m256 _r08 = _mm256_broadcast_ss(r0 + 7); __m256 _r18 = _mm256_broadcast_ss(r1 + 7); __m256 _r28 = _mm256_broadcast_ss(r2 + 7); __m256 _r09 = _mm256_broadcast_ss(r0 + 8); __m256 _r19 = _mm256_broadcast_ss(r1 + 8); __m256 _r29 = _mm256_broadcast_ss(r2 + 8); __m256 _sum03 = _mm256_loadu_ps(outptr0 + 24); __m256 _sum13 = _mm256_loadu_ps(outptr1 + 24); _sum03 = _mm256_comp_fmadd_ps(_r07, _k00_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r08, _k01_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r09, _k02_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r17, _k10_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r18, _k11_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r19, _k12_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r27, _k20_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r28, _k21_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r29, _k22_0, _sum03); _sum13 = _mm256_comp_fmadd_ps(_r07, _k00_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r08, _k01_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r09, _k02_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r17, _k10_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r18, _k11_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r19, _k12_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r27, _k20_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r28, _k21_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r29, _k22_1, _sum13); _mm256_storeu_ps(outptr0 + 24, _sum03); _mm256_storeu_ps(outptr1 + 24, _sum13); __m256 _r010 = _mm256_broadcast_ss(r0 + 9); __m256 _r110 = _mm256_broadcast_ss(r1 + 9); __m256 _r210 = _mm256_broadcast_ss(r2 + 9); __m256 _r011 = _mm256_broadcast_ss(r0 + 10); __m256 _r111 = _mm256_broadcast_ss(r1 + 10); __m256 _r211 = _mm256_broadcast_ss(r2 + 10); __m256 _sum04 = _mm256_loadu_ps(outptr0 + 32); __m256 _sum14 = _mm256_loadu_ps(outptr1 + 32); _sum04 = _mm256_comp_fmadd_ps(_r09, _k00_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r010, _k01_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r011, _k02_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r19, _k10_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r110, _k11_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r111, _k12_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r29, _k20_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r210, _k21_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r211, _k22_0, _sum04); _sum14 = _mm256_comp_fmadd_ps(_r09, _k00_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r010, _k01_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r011, _k02_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r19, _k10_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r110, _k11_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r111, _k12_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r29, _k20_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r210, _k21_1, _sum14); _sum14 = _mm256_comp_fmadd_ps(_r211, _k22_1, _sum14); _mm256_storeu_ps(outptr0 + 32, _sum04); _mm256_storeu_ps(outptr1 + 32, _sum14); __m256 _r012 = _mm256_broadcast_ss(r0 + 11); __m256 _r112 = _mm256_broadcast_ss(r1 + 11); __m256 _r212 = _mm256_broadcast_ss(r2 + 11); __m256 _r013 = _mm256_broadcast_ss(r0 + 12); __m256 _r113 = _mm256_broadcast_ss(r1 + 12); __m256 _r213 = _mm256_broadcast_ss(r2 + 12); __m256 _sum05 = _mm256_loadu_ps(outptr0 + 40); __m256 _sum15 = _mm256_loadu_ps(outptr1 + 40); _sum05 = _mm256_comp_fmadd_ps(_r011, _k00_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r012, _k01_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r013, _k02_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r111, _k10_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r112, _k11_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r113, _k12_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r211, _k20_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r212, _k21_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r213, _k22_0, _sum05); _sum15 = _mm256_comp_fmadd_ps(_r011, _k00_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r012, _k01_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r013, _k02_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r111, _k10_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r112, _k11_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r113, _k12_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r211, _k20_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r212, _k21_1, _sum15); _sum15 = _mm256_comp_fmadd_ps(_r213, _k22_1, _sum15); _mm256_storeu_ps(outptr0 + 40, _sum05); _mm256_storeu_ps(outptr1 + 40, _sum15); __m256 _r014 = _mm256_broadcast_ss(r0 + 13); __m256 _r114 = _mm256_broadcast_ss(r1 + 13); __m256 _r214 = _mm256_broadcast_ss(r2 + 13); __m256 _r015 = _mm256_broadcast_ss(r0 + 14); __m256 _r115 = _mm256_broadcast_ss(r1 + 14); __m256 _r215 = _mm256_broadcast_ss(r2 + 14); __m256 _sum06 = _mm256_loadu_ps(outptr0 + 48); __m256 _sum16 = _mm256_loadu_ps(outptr1 + 48); _sum06 = _mm256_comp_fmadd_ps(_r013, _k00_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r014, _k01_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r015, _k02_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r113, _k10_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r114, _k11_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r115, _k12_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r213, _k20_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r214, _k21_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r215, _k22_0, _sum06); _sum16 = _mm256_comp_fmadd_ps(_r013, _k00_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r014, _k01_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r015, _k02_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r113, _k10_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r114, _k11_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r115, _k12_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r213, _k20_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r214, _k21_1, _sum16); _sum16 = _mm256_comp_fmadd_ps(_r215, _k22_1, _sum16); _mm256_storeu_ps(outptr0 + 48, _sum06); _mm256_storeu_ps(outptr1 + 48, _sum16); __m256 _r016 = _mm256_broadcast_ss(r0 + 15); __m256 _r116 = _mm256_broadcast_ss(r1 + 15); __m256 _r216 = _mm256_broadcast_ss(r2 + 15); __m256 _r017 = _mm256_broadcast_ss(r0 + 16); __m256 _r117 = _mm256_broadcast_ss(r1 + 16); __m256 _r217 = _mm256_broadcast_ss(r2 + 16); __m256 _sum07 = _mm256_loadu_ps(outptr0 + 56); __m256 _sum17 = _mm256_loadu_ps(outptr1 + 56); _sum07 = _mm256_comp_fmadd_ps(_r015, _k00_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r016, _k01_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r017, _k02_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r115, _k10_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r116, _k11_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r117, _k12_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r215, _k20_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r216, _k21_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r217, _k22_0, _sum07); _sum17 = _mm256_comp_fmadd_ps(_r015, _k00_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r016, _k01_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r017, _k02_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r115, _k10_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r116, _k11_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r117, _k12_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r215, _k20_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r216, _k21_1, _sum17); _sum17 = _mm256_comp_fmadd_ps(_r217, _k22_1, _sum17); _mm256_storeu_ps(outptr0 + 56, _sum07); _mm256_storeu_ps(outptr1 + 56, _sum17); r0 += 16; r1 += 16; r2 += 16; outptr0 += 64; outptr1 += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _sum11 = _mm256_loadu_ps(outptr1 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum01 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum01); _sum11 = _mm256_comp_fmadd_ps(_r03, _k00_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r04, _k01_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r05, _k02_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r13, _k10_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r14, _k11_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r15, _k12_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r23, _k20_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r24, _k21_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r25, _k22_1, _sum11); _mm256_storeu_ps(outptr0 + 8, _sum01); _mm256_storeu_ps(outptr1 + 8, _sum11); __m256 _sum02 = _mm256_loadu_ps(outptr0 + 16); __m256 _sum12 = _mm256_loadu_ps(outptr1 + 16); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); __m256 _r07 = _mm256_broadcast_ss(r0 + 6); __m256 _r17 = _mm256_broadcast_ss(r1 + 6); __m256 _r27 = _mm256_broadcast_ss(r2 + 6); _sum02 = _mm256_comp_fmadd_ps(_r05, _k00_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r06, _k01_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r07, _k02_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r15, _k10_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r16, _k11_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r17, _k12_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r25, _k20_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r26, _k21_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r27, _k22_0, _sum02); _sum12 = _mm256_comp_fmadd_ps(_r05, _k00_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r06, _k01_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r07, _k02_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r15, _k10_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r16, _k11_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r17, _k12_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r25, _k20_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r26, _k21_1, _sum12); _sum12 = _mm256_comp_fmadd_ps(_r27, _k22_1, _sum12); _mm256_storeu_ps(outptr0 + 16, _sum02); _mm256_storeu_ps(outptr1 + 16, _sum12); __m256 _r08 = _mm256_broadcast_ss(r0 + 7); __m256 _r18 = _mm256_broadcast_ss(r1 + 7); __m256 _r28 = _mm256_broadcast_ss(r2 + 7); __m256 _r09 = _mm256_broadcast_ss(r0 + 8); __m256 _r19 = _mm256_broadcast_ss(r1 + 8); __m256 _r29 = _mm256_broadcast_ss(r2 + 8); __m256 _sum03 = _mm256_loadu_ps(outptr0 + 24); __m256 _sum13 = _mm256_loadu_ps(outptr1 + 24); _sum03 = _mm256_comp_fmadd_ps(_r07, _k00_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r08, _k01_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r09, _k02_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r17, _k10_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r18, _k11_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r19, _k12_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r27, _k20_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r28, _k21_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r29, _k22_0, _sum03); _sum13 = _mm256_comp_fmadd_ps(_r07, _k00_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r08, _k01_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r09, _k02_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r17, _k10_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r18, _k11_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r19, _k12_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r27, _k20_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r28, _k21_1, _sum13); _sum13 = _mm256_comp_fmadd_ps(_r29, _k22_1, _sum13); _mm256_storeu_ps(outptr0 + 24, _sum03); _mm256_storeu_ps(outptr1 + 24, _sum13); r0 += 8; r1 += 8; r2 += 8; outptr0 += 32; outptr1 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _sum11 = _mm256_loadu_ps(outptr1 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum01 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum01); _sum11 = _mm256_comp_fmadd_ps(_r03, _k00_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r04, _k01_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r05, _k02_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r13, _k10_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r14, _k11_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r15, _k12_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r23, _k20_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r24, _k21_1, _sum11); _sum11 = _mm256_comp_fmadd_ps(_r25, _k22_1, _sum11); _mm256_storeu_ps(outptr0 + 8, _sum01); _mm256_storeu_ps(outptr1 + 8, _sum11); r0 += 4; r1 += 4; r2 += 4; outptr0 += 16; outptr1 += 16; } for (; j < outw; j++) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _sum10 = _mm256_loadu_ps(outptr1); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _sum10 = _mm256_comp_fmadd_ps(_r01, _k00_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r02, _k01_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r03, _k02_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r11, _k10_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r12, _k11_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r13, _k12_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r21, _k20_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r22, _k21_1, _sum10); _sum10 = _mm256_comp_fmadd_ps(_r23, _k22_1, _sum10); _mm256_storeu_ps(outptr0, _sum00); _mm256_storeu_ps(outptr1, _sum10); r0 += 2; r1 += 2; r2 += 2; outptr0 += 8; outptr1 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 8; k1 += 9 * 8; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); const float* k0 = kernel.channel(p); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00_0 = _mm256_loadu_ps(k0); __m256 _k01_0 = _mm256_loadu_ps(k0 + 8); __m256 _k02_0 = _mm256_loadu_ps(k0 + 16); __m256 _k10_0 = _mm256_loadu_ps(k0 + 24); __m256 _k11_0 = _mm256_loadu_ps(k0 + 32); __m256 _k12_0 = _mm256_loadu_ps(k0 + 40); __m256 _k20_0 = _mm256_loadu_ps(k0 + 48); __m256 _k21_0 = _mm256_loadu_ps(k0 + 56); __m256 _k22_0 = _mm256_loadu_ps(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _mm256_storeu_ps(outptr0, _sum00); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum01 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum01); _mm256_storeu_ps(outptr0 + 8, _sum01); __m256 _sum02 = _mm256_loadu_ps(outptr0 + 16); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); __m256 _r07 = _mm256_broadcast_ss(r0 + 6); __m256 _r17 = _mm256_broadcast_ss(r1 + 6); __m256 _r27 = _mm256_broadcast_ss(r2 + 6); _sum02 = _mm256_comp_fmadd_ps(_r05, _k00_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r06, _k01_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r07, _k02_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r15, _k10_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r16, _k11_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r17, _k12_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r25, _k20_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r26, _k21_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r27, _k22_0, _sum02); _mm256_storeu_ps(outptr0 + 16, _sum02); __m256 _r08 = _mm256_broadcast_ss(r0 + 7); __m256 _r18 = _mm256_broadcast_ss(r1 + 7); __m256 _r28 = _mm256_broadcast_ss(r2 + 7); __m256 _r09 = _mm256_broadcast_ss(r0 + 8); __m256 _r19 = _mm256_broadcast_ss(r1 + 8); __m256 _r29 = _mm256_broadcast_ss(r2 + 8); __m256 _sum03 = _mm256_loadu_ps(outptr0 + 24); _sum03 = _mm256_comp_fmadd_ps(_r07, _k00_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r08, _k01_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r09, _k02_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r17, _k10_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r18, _k11_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r19, _k12_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r27, _k20_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r28, _k21_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r29, _k22_0, _sum03); _mm256_storeu_ps(outptr0 + 24, _sum03); __m256 _r010 = _mm256_broadcast_ss(r0 + 9); __m256 _r110 = _mm256_broadcast_ss(r1 + 9); __m256 _r210 = _mm256_broadcast_ss(r2 + 9); __m256 _r011 = _mm256_broadcast_ss(r0 + 10); __m256 _r111 = _mm256_broadcast_ss(r1 + 10); __m256 _r211 = _mm256_broadcast_ss(r2 + 10); __m256 _sum04 = _mm256_loadu_ps(outptr0 + 32); _sum04 = _mm256_comp_fmadd_ps(_r09, _k00_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r010, _k01_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r011, _k02_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r19, _k10_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r110, _k11_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r111, _k12_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r29, _k20_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r210, _k21_0, _sum04); _sum04 = _mm256_comp_fmadd_ps(_r211, _k22_0, _sum04); _mm256_storeu_ps(outptr0 + 32, _sum04); __m256 _r012 = _mm256_broadcast_ss(r0 + 11); __m256 _r112 = _mm256_broadcast_ss(r1 + 11); __m256 _r212 = _mm256_broadcast_ss(r2 + 11); __m256 _r013 = _mm256_broadcast_ss(r0 + 12); __m256 _r113 = _mm256_broadcast_ss(r1 + 12); __m256 _r213 = _mm256_broadcast_ss(r2 + 12); __m256 _sum05 = _mm256_loadu_ps(outptr0 + 40); _sum05 = _mm256_comp_fmadd_ps(_r011, _k00_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r012, _k01_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r013, _k02_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r111, _k10_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r112, _k11_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r113, _k12_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r211, _k20_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r212, _k21_0, _sum05); _sum05 = _mm256_comp_fmadd_ps(_r213, _k22_0, _sum05); _mm256_storeu_ps(outptr0 + 40, _sum05); __m256 _r014 = _mm256_broadcast_ss(r0 + 13); __m256 _r114 = _mm256_broadcast_ss(r1 + 13); __m256 _r214 = _mm256_broadcast_ss(r2 + 13); __m256 _r015 = _mm256_broadcast_ss(r0 + 14); __m256 _r115 = _mm256_broadcast_ss(r1 + 14); __m256 _r215 = _mm256_broadcast_ss(r2 + 14); __m256 _sum06 = _mm256_loadu_ps(outptr0 + 48); _sum06 = _mm256_comp_fmadd_ps(_r013, _k00_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r014, _k01_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r015, _k02_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r113, _k10_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r114, _k11_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r115, _k12_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r213, _k20_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r214, _k21_0, _sum06); _sum06 = _mm256_comp_fmadd_ps(_r215, _k22_0, _sum06); _mm256_storeu_ps(outptr0 + 48, _sum06); __m256 _r016 = _mm256_broadcast_ss(r0 + 15); __m256 _r116 = _mm256_broadcast_ss(r1 + 15); __m256 _r216 = _mm256_broadcast_ss(r2 + 15); __m256 _r017 = _mm256_broadcast_ss(r0 + 16); __m256 _r117 = _mm256_broadcast_ss(r1 + 16); __m256 _r217 = _mm256_broadcast_ss(r2 + 16); __m256 _sum07 = _mm256_loadu_ps(outptr0 + 56); _sum07 = _mm256_comp_fmadd_ps(_r015, _k00_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r016, _k01_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r017, _k02_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r115, _k10_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r116, _k11_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r117, _k12_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r215, _k20_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r216, _k21_0, _sum07); _sum07 = _mm256_comp_fmadd_ps(_r217, _k22_0, _sum07); _mm256_storeu_ps(outptr0 + 56, _sum07); r0 += 16; r1 += 16; r2 += 16; outptr0 += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _mm256_storeu_ps(outptr0, _sum00); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum01 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum01); _mm256_storeu_ps(outptr0 + 8, _sum01); __m256 _sum02 = _mm256_loadu_ps(outptr0 + 16); __m256 _r06 = _mm256_broadcast_ss(r0 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 5); __m256 _r26 = _mm256_broadcast_ss(r2 + 5); __m256 _r07 = _mm256_broadcast_ss(r0 + 6); __m256 _r17 = _mm256_broadcast_ss(r1 + 6); __m256 _r27 = _mm256_broadcast_ss(r2 + 6); _sum02 = _mm256_comp_fmadd_ps(_r05, _k00_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r06, _k01_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r07, _k02_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r15, _k10_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r16, _k11_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r17, _k12_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r25, _k20_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r26, _k21_0, _sum02); _sum02 = _mm256_comp_fmadd_ps(_r27, _k22_0, _sum02); _mm256_storeu_ps(outptr0 + 16, _sum02); __m256 _r08 = _mm256_broadcast_ss(r0 + 7); __m256 _r18 = _mm256_broadcast_ss(r1 + 7); __m256 _r28 = _mm256_broadcast_ss(r2 + 7); __m256 _r09 = _mm256_broadcast_ss(r0 + 8); __m256 _r19 = _mm256_broadcast_ss(r1 + 8); __m256 _r29 = _mm256_broadcast_ss(r2 + 8); __m256 _sum03 = _mm256_loadu_ps(outptr0 + 24); _sum03 = _mm256_comp_fmadd_ps(_r07, _k00_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r08, _k01_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r09, _k02_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r17, _k10_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r18, _k11_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r19, _k12_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r27, _k20_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r28, _k21_0, _sum03); _sum03 = _mm256_comp_fmadd_ps(_r29, _k22_0, _sum03); _mm256_storeu_ps(outptr0 + 24, _sum03); r0 += 8; r1 += 8; r2 += 8; outptr0 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _mm256_storeu_ps(outptr0, _sum00); __m256 _sum01 = _mm256_loadu_ps(outptr0 + 8); __m256 _r04 = _mm256_broadcast_ss(r0 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 3); __m256 _r24 = _mm256_broadcast_ss(r2 + 3); __m256 _r05 = _mm256_broadcast_ss(r0 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 4); __m256 _r25 = _mm256_broadcast_ss(r2 + 4); _sum01 = _mm256_comp_fmadd_ps(_r03, _k00_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r04, _k01_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r05, _k02_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r13, _k10_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r14, _k11_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r15, _k12_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r23, _k20_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r24, _k21_0, _sum01); _sum01 = _mm256_comp_fmadd_ps(_r25, _k22_0, _sum01); _mm256_storeu_ps(outptr0 + 8, _sum01); r0 += 4; r1 += 4; r2 += 4; outptr0 += 16; } for (; j < outw; j++) { __m256 _sum00 = _mm256_loadu_ps(outptr0); __m256 _r01 = _mm256_broadcast_ss(r0); __m256 _r02 = _mm256_broadcast_ss(r0 + 1); __m256 _r03 = _mm256_broadcast_ss(r0 + 2); __m256 _r11 = _mm256_broadcast_ss(r1); __m256 _r12 = _mm256_broadcast_ss(r1 + 1); __m256 _r13 = _mm256_broadcast_ss(r1 + 2); __m256 _r21 = _mm256_broadcast_ss(r2); __m256 _r22 = _mm256_broadcast_ss(r2 + 1); __m256 _r23 = _mm256_broadcast_ss(r2 + 2); _sum00 = _mm256_comp_fmadd_ps(_r01, _k00_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r02, _k01_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r03, _k02_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r11, _k10_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r12, _k11_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r13, _k12_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r21, _k20_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r22, _k21_0, _sum00); _sum00 = _mm256_comp_fmadd_ps(_r23, _k22_0, _sum00); _mm256_storeu_ps(outptr0, _sum00); r0 += 2; r1 += 2; r2 += 2; outptr0 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * 8; } } }
GB_subassign_02.c
//------------------------------------------------------------------------------ // GB_subassign_02: C(I,J) = A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 02: C(I,J) = A ; using S // M: NULL // Mask_comp: false // C_replace: false // accum: NULL // A: matrix // S: constructed // C: not bitmap or full: use GB_bitmap_assign instead // A: any sparsity structure. #include "GB_subassign_methods.h" GrB_Info GB_subassign_02 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix A, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (&S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (A) ; GB_GET_C ; // C must not be bitmap GB_GET_A ; GB_GET_S ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 02: C(I,J) = A ; using S //-------------------------------------------------------------------------- // Time: Optimal. All entries in A+S must be examined, so the work is // Omega (nnz(A)+nnz(S)). // Method 02 and Method 04 are somewhat similar. They differ on how C is // modified when the entry is present in S but not A. // TODO: phase2 of Method 02 and 04 are identical and could be // done in a single function. //-------------------------------------------------------------------------- // Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (A_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all A+S GB_SUBASSIGN_TWO_SLICE (A, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (A_is_bitmap) { //---------------------------------------------------------------------- // phase1: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (Sfound && !Afound) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( delete ): becomes zombie // [X . 1]: action: ( X ): still a zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } else if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; } else if (Sfound && Afound) { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =A ): copy A into C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_noaccum_C_A_1_matrix ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { // ----[C . 1] or [X . 1]------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( delete ): becomes zombie // [X . 1]: action: ( X ): still a zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) task_pending++ ; GB_NEXT (A) ; } else { // ----[C A 1] or [X A 1]------------------------------- // both S (i,j) and A (i,j) present // [C A 1]: action: ( =A ): copy A into C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_noaccum_C_A_1_matrix ; GB_NEXT (S) ; GB_NEXT (A) ; } } // while list S (:,j) has entries. List A (:,j) exhausted. while (pS < pS_end) { // ----[C . 1] or [X . 1]----------------------------------- // S (i,j) is present but A (i,j) is not // [C . 1]: action: ( delete ): becomes zombie // [X . 1]: action: ( X ): still a zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } // List A (:,j) has entries. List S (:,j) exhausted. task_pending += (pA_end - pA) ; } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (A_is_bitmap) { //---------------------------------------------------------------------- // phase2: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (!Sfound && Afound) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; GB_NEXT (A) ; } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { GB_NEXT (S) ; } else if (iA < iS) { // ----[. A 1]------------------------------------------ // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; GB_NEXT (A) ; } else { GB_NEXT (S) ; GB_NEXT (A) ; } } // ignore the remainder of S (:,j) // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // ----[. A 1]---------------------------------------------- // S (i,j) is not present, A (i,j) is present // [. A 1]: action: ( insert ) int64_t iA = GBI (Ai, pA, Avlen) ; int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
66.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp target teams distribute parallel for schedule(dynamic) private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp target teams distribute parallel for simd schedule(dynamic, 8) dist_schedule(static, 8) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
other-nearest_gpu.c
//<libmptogpu> Error executing kernel. Global Work Size is NULL or exceeded //valid range. #include "BenchmarksUtil.h" #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> typedef struct point { int x; int y; } point; typedef struct sel_points { int position; float value; } sel_points; #ifdef RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 500 #endif #define points 250 #define var SIZE / points #define default_v 100000.00 #define ERROR_THRESHOLD 0.01 void init(int s, point *vector, sel_points *selected) { int i, j; for (i = 0; i < s; i++) { vector[i].x = i; vector[i].y = i * 2; } for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { selected[i * s + j].position = 0; selected[i * s + j].value = default_v; } } } void k_nearest_gpu(int s, point *vector, sel_points *selected) { int i, j, m, q; q = s * s; #pragma omp target map(to : vector[0 : s]) map(tofrom : selected[0 : q]) \ device(DEVICE_ID) { #pragma omp parallel for collapse(1) for (i = 0; i < s; i++) { for (j = i + 1; j < s; j++) { float distance, x, y; x = vector[i].x - vector[j].x; y = vector[i].y - vector[j].y; x = x * x; y = y * y; distance = x + y; distance = sqrt(distance); selected[i * s + j].value = distance; selected[i * s + j].position = j; selected[j * s + i].value = distance; selected[j * s + i].position = i; } } /// for each line in matrix /// order values #pragma omp parallel for collapse(1) for (i = 0; i < s; i++) { for (j = 0; j < s; j++) { for (m = j + 1; m < s; m++) { if (selected[i * s + j].value > selected[i * s + m].value) { sel_points aux; aux = selected[i * s + j]; selected[i * s + j] = selected[i * s + m]; selected[i * s + m] = aux; } } } } } } void k_nearest_cpu(int s, point *vector, sel_points *selected) { int i, j; for (i = 0; i < s; i++) { for (j = i + 1; j < s; j++) { float distance, x, y; x = vector[i].x - vector[j].x; y = vector[i].y - vector[j].y; x = x * x; y = y * y; distance = x + y; distance = sqrt(distance); selected[i * s + j].value = distance; selected[i * s + j].position = j; selected[j * s + i].value = distance; selected[j * s + i].position = i; } } } void order_points(int s, point *vector, sel_points *selected) { int i; for (i = 0; i < s; i++) { /// for each line in matrix /// order values int j; for (j = 0; j < s; j++) { int m; for (m = j + 1; m < s; m++) { if (selected[i * s + j].value > selected[i * s + m].value) { sel_points aux; aux = selected[i * s + j]; selected[i * s + j] = selected[i * s + m]; selected[i * s + m] = aux; } } } } } int compareResults(sel_points *B, sel_points *B_GPU) { int i, j, fail; fail = 0; // Compare B and B_GPU for (i = 0; i < SIZE; i++) { for (j = 0; j < SIZE; j++) { // Value if (percentDiff(B[i * SIZE + j].value, B_GPU[i * SIZE + j].value) > ERROR_THRESHOLD) { fail++; } // Position if (percentDiff(B[i * SIZE + j].position, B_GPU[i * SIZE + j].position) > ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", ERROR_THRESHOLD, fail); return fail; } int main(int argc, char *argv[]) { double t_start, t_end; int fail = 0; point *vector; sel_points *selected_cpu, *selected_gpu; vector = (point *)malloc(sizeof(point) * SIZE); selected_cpu = (sel_points *)malloc(sizeof(sel_points) * SIZE * SIZE); selected_gpu = (sel_points *)malloc(sizeof(sel_points) * SIZE * SIZE); int i; fprintf(stdout, "<< Nearest >>\n"); t_start = rtclock(); for (i = (var - 1); i < SIZE; i += var) { init(i, vector, selected_cpu); k_nearest_cpu(i, vector, selected_cpu); order_points(i, vector, selected_cpu); } t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); for (i = (var - 1); i < SIZE; i += var) { init(i, vector, selected_gpu); k_nearest_gpu(i, vector, selected_gpu); } t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(selected_cpu, selected_gpu); #endif free(selected_cpu); free(selected_gpu); free(vector); return fail; }
GB_unop__identity_fc64_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc64_uint16 // op(A') function: GB_unop_tran__identity_fc64_uint16 // C type: GxB_FC64_t // A type: uint16_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc64_uint16 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__plus_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__plus_uint16 // A.*B function (eWiseMult): GB_AemultB__plus_uint16 // A*D function (colscale): GB_AxD__plus_uint16 // D*A function (rowscale): GB_DxB__plus_uint16 // C+=B function (dense accum): GB_Cdense_accumB__plus_uint16 // C+=b function (dense accum): GB_Cdense_accumb__plus_uint16 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__plus_uint16 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__plus_uint16 // C=scalar+B GB_bind1st__plus_uint16 // C=scalar+B' GB_bind1st_tran__plus_uint16 // C=A+scalar GB_bind2nd__plus_uint16 // C=A'+scalar GB_bind2nd_tran__plus_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x + y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_UINT16 || GxB_NO_PLUS_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__plus_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__plus_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__plus_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__plus_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__plus_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__plus_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__plus_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__plus_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__plus_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__plus_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB_bind1st_tran__plus_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB_bind2nd_tran__plus_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gear_scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // #if !defined(KRATOS_GEAR_SCHEME_H_INCLUDED ) #define KRATOS_GEAR_SCHEME_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "solving_strategies/schemes/scheme.h" #include "includes/define.h" //#include "includes/serializer.h" #include "includes/dof.h" //#include "includes/variables.h" #include "fluid_dynamics_application_variables.h" #include "includes/model_part.h" #include "processes/process.h" #include "containers/pointer_vector_set.h" #include "utilities/openmp_utils.h" namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// A scheme for BDF2 time integration. /** */ template<class TSparseSpace,class TDenseSpace> class GearScheme : public Scheme<TSparseSpace, TDenseSpace> { public: ///@name Type Definitions ///@{ /// Pointer definition of GearScheme KRATOS_CLASS_POINTER_DEFINITION(GearScheme); typedef Scheme<TSparseSpace,TDenseSpace> BaseType; typedef typename TSparseSpace::DataType TDataType; typedef typename TSparseSpace::MatrixType TSystemMatrixType; typedef typename TSparseSpace::VectorType TSystemVectorType; typedef typename TDenseSpace::MatrixType LocalSystemMatrixType; typedef typename TDenseSpace::VectorType LocalSystemVectorType; typedef Dof<TDataType> TDofType; typedef typename BaseType::DofsArrayType DofsArrayType; ///@} ///@name Life Cycle ///@{ /// Default constructor. GearScheme() : Scheme<TSparseSpace, TDenseSpace>(), mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) {} /// Constructor to use the formulation combined with a turbulence model. /** * The turbulence model is assumed to be implemented as a Kratos::Process. * The model's Execute() method wil be called at the start of each * non-linear iteration. * @param pTurbulenceModel pointer to the turbulence model */ GearScheme(Process::Pointer pTurbulenceModel) : Scheme<TSparseSpace, TDenseSpace>(), mpTurbulenceModel(pTurbulenceModel), mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) {} /// Constructor for periodic boundary conditions. /** * @param rPeriodicVar the variable used to store periodic pair indices. */ GearScheme(const Kratos::Variable<int>& rPeriodicVar) : Scheme<TSparseSpace, TDenseSpace>(), mrPeriodicIdVar(rPeriodicVar) {} /// Destructor. ~GearScheme() override {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Check input data for errors. /** * @param rModelPart The fluid's ModelPart * @return 0 if no errors were found */ int Check(ModelPart& rModelPart) override { KRATOS_TRY int ErrorCode = BaseType::Check(rModelPart); if (ErrorCode != 0) return ErrorCode; // const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // Check buffer size if (rModelPart.GetBufferSize() < 3) KRATOS_THROW_ERROR(std::logic_error, "GearScheme error: Insufficient buffer size for BDF2, should be at least 3, got ",rModelPart.GetBufferSize()); // Check that all required variables were registered if(DELTA_TIME.Key() == 0) KRATOS_THROW_ERROR(std::invalid_argument,"TIME_STEP Key is 0. Check if all applications were correctly registered.",""); if(BDF_COEFFICIENTS.Key() == 0) KRATOS_THROW_ERROR(std::invalid_argument,"BDF_COEFFICIENTS Key is 0. Check if all applications were correctly registered.",""); if(OSS_SWITCH.Key() == 0) KRATOS_THROW_ERROR(std::invalid_argument,"OSS_SWITCH Key is 0. Check if all applications were correctly registered.",""); if(DISPLACEMENT.Key() == 0) KRATOS_THROW_ERROR(std::invalid_argument,"DISPLACEMENT Key is 0. Check if all applications were correctly registered.",""); if(VELOCITY.Key() == 0) KRATOS_THROW_ERROR(std::invalid_argument,"VELOCITY Key is 0. Check if all applications were correctly registered.",""); if(MESH_VELOCITY.Key() == 0) KRATOS_THROW_ERROR(std::invalid_argument,"MESH_VELOCITY Key is 0. Check if all applications were correctly registered.",""); if(ACCELERATION.Key() == 0) KRATOS_THROW_ERROR(std::invalid_argument,"ACCELERATION Key is 0. Check if all applications were correctly registered.",""); // // Check that the ModelPart's ProcessInfo contains the required variables // if(rCurrentProcessInfo.Has(DELTA_TIME) != true) // KRATOS_THROW_ERROR(std::invalid_argument,"No value of DELTA_TIME defined in ProcessInfo for a model part passed to GearScheme",""); // if(rCurrentProcessInfo.Has(BDF_COEFFICIENTS) != true) // KRATOS_THROW_ERROR(std::invalid_argument,"No value of BDF_COEFFICIENTS defined in ProcessInfo for a model part passed to GearScheme",""); // if(rCurrentProcessInfo.Has(OSS_SWITCH) != true) // KRATOS_THROW_ERROR(std::invalid_argument,"No value of OSS_SWITCH defined in ProcessInfo for a model part passed to GearScheme",""); return 0; KRATOS_CATCH(""); } /// Set the time iteration coefficients void InitializeSolutionStep(ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { this->SetTimeCoefficients(rModelPart.GetProcessInfo()); // Base function initializes elements and conditions BaseType::InitializeSolutionStep(rModelPart,A,Dx,b); // Recalculate mesh velocity (to account for variable time step) const double Dt = rModelPart.GetProcessInfo()[DELTA_TIME]; const double OldDt = rModelPart.GetProcessInfo().GetPreviousSolutionStepInfo(1)[DELTA_TIME]; if(Dt != OldDt) { const Vector& BDFcoefs = rModelPart.GetProcessInfo()[BDF_COEFFICIENTS]; // OpenMP partition int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(rModelPart.NumberOfNodes(),NumThreads,NodePartition); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k+1]; for(ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { array_1d<double,3>& rMeshVel = itNode->FastGetSolutionStepValue(MESH_VELOCITY); const array_1d<double,3>& rDisp0 = itNode->FastGetSolutionStepValue(DISPLACEMENT); const array_1d<double,3>& rDisp1 = itNode->FastGetSolutionStepValue(DISPLACEMENT,1); const array_1d<double,3>& rDisp2 = itNode->FastGetSolutionStepValue(DISPLACEMENT,2); rMeshVel = BDFcoefs[0] * rDisp0 + BDFcoefs[1] * rDisp1 + BDFcoefs[2] * rDisp2; } } } } void InitializeNonLinIteration(ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY if (mpTurbulenceModel != 0) mpTurbulenceModel->Execute(); KRATOS_CATCH("") } void FinalizeNonLinIteration(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { const ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //if orthogonal subscales are computed if (CurrentProcessInfo[OSS_SWITCH] == 1.0) { this->LumpedProjection(rModelPart); //this->FullProjection(rModelPart); } } /// Start the iteration by providing a first approximation to the solution. void Predict(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(rModelPart.NumberOfNodes(), NumThreads, NodePartition); const Vector& BDFcoefs = rModelPart.GetProcessInfo()[BDF_COEFFICIENTS]; return; #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k+1]; for(ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { array_1d<double,3>& rVel0 = itNode->FastGetSolutionStepValue(VELOCITY); const array_1d<double,3>& rVel1 = itNode->FastGetSolutionStepValue(VELOCITY,1); const array_1d<double,3>& rVel2 = itNode->FastGetSolutionStepValue(VELOCITY,2); array_1d<double,3>& rAcceleration = itNode->FastGetSolutionStepValue(ACCELERATION); // Predict velocities if(!itNode->IsFixed(VELOCITY_X)) rVel0[0] = 2.00 * rVel1[0] - rVel2[0]; if(!itNode->IsFixed(VELOCITY_Y)) rVel0[1] = 2.00 * rVel1[1] - rVel2[1]; if(!itNode->IsFixed(VELOCITY_Z)) rVel0[2] = 2.00 * rVel1[2] - rVel2[2]; // Predict acceleration rAcceleration = BDFcoefs[0] * rVel0 + BDFcoefs[1] * rVel1 + BDFcoefs[2] * rVel2; } } KRATOS_CATCH("") } /// Store the iteration results as solution step variables and update acceleration after a Newton-Raphson iteration. /** * @param rModelPart fluid ModelPart * @param rDofSet DofSet containing the Newton-Raphson system degrees of freedom. * @param A Newton-Raphson system matrix (unused) * @param Dx Newton-Raphson iteration solution * @param b Newton-Raphson right hand side (unused) */ void Update(ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY mpDofUpdater->UpdateDofs(rDofSet,Dx); const Vector& BDFCoefs = rModelPart.GetProcessInfo()[BDF_COEFFICIENTS]; this->UpdateAcceleration(rModelPart,BDFCoefs); KRATOS_CATCH("") } void CalculateSystemContributions(Element::Pointer rCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY LocalSystemMatrixType Mass; LocalSystemMatrixType Damp; // Initialize element rCurrentElement->InitializeNonLinearIteration(rCurrentProcessInfo); // Get Equation Id rCurrentElement->EquationIdVector(rEquationId,rCurrentProcessInfo); // Get matrix contributions rCurrentElement->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo); rCurrentElement->CalculateMassMatrix(Mass,rCurrentProcessInfo); rCurrentElement->CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo); // Add the dynamic contributions to the local system using BDF2 coefficients this->CombineLHSContributions(LHS_Contribution,Mass,Damp,rCurrentProcessInfo); this->AddDynamicRHSContribution<Kratos::Element>(rCurrentElement,RHS_Contribution,Mass,rCurrentProcessInfo); KRATOS_CATCH("") } void Calculate_RHS_Contribution(Element::Pointer rCurrentElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY LocalSystemMatrixType Mass; LocalSystemMatrixType Damp; // Initialize element rCurrentElement->InitializeNonLinearIteration(rCurrentProcessInfo); // Get Equation Id rCurrentElement->EquationIdVector(rEquationId,rCurrentProcessInfo); // Get matrix contributions rCurrentElement->CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo); rCurrentElement->CalculateMassMatrix(Mass,rCurrentProcessInfo); rCurrentElement->CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo); // Add the dynamic contributions to the local system using BDF2 coefficients this->AddDynamicRHSContribution<Kratos::Element>(rCurrentElement,RHS_Contribution,Mass,rCurrentProcessInfo); KRATOS_CATCH("") } void Condition_CalculateSystemContributions(Condition::Pointer rCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY LocalSystemMatrixType Mass; LocalSystemMatrixType Damp; // Initialize element rCurrentCondition->InitializeNonLinearIteration(rCurrentProcessInfo); // Get Equation Id rCurrentCondition->EquationIdVector(rEquationId,rCurrentProcessInfo); // Get matrix contributions rCurrentCondition->CalculateLocalSystem(LHS_Contribution,RHS_Contribution,rCurrentProcessInfo); rCurrentCondition->CalculateMassMatrix(Mass,rCurrentProcessInfo); rCurrentCondition->CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo); // Add the dynamic contributions to the local system using BDF2 coefficients this->CombineLHSContributions(LHS_Contribution,Mass,Damp,rCurrentProcessInfo); this->AddDynamicRHSContribution<Kratos::Condition>(rCurrentCondition,RHS_Contribution,Mass,rCurrentProcessInfo); KRATOS_CATCH("") } void Condition_Calculate_RHS_Contribution(Condition::Pointer rCurrentCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationId, ProcessInfo& rCurrentProcessInfo) override { KRATOS_TRY LocalSystemMatrixType Mass; LocalSystemMatrixType Damp; // Initialize element rCurrentCondition->InitializeNonLinearIteration(rCurrentProcessInfo); // Get Equation Id rCurrentCondition->EquationIdVector(rEquationId,rCurrentProcessInfo); // Get matrix contributions rCurrentCondition->CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo); rCurrentCondition->CalculateMassMatrix(Mass,rCurrentProcessInfo); rCurrentCondition->CalculateLocalVelocityContribution(Damp,RHS_Contribution,rCurrentProcessInfo); // Add the dynamic contributions to the local system using BDF2 coefficients this->AddDynamicRHSContribution<Kratos::Condition>(rCurrentCondition,RHS_Contribution,Mass,rCurrentProcessInfo); KRATOS_CATCH("") } /// Free memory allocated by this object. void Clear() override { this->mpDofUpdater->Clear(); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "GearScheme"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables. */ void SetTimeCoefficients(ProcessInfo& rCurrentProcessInfo) { KRATOS_TRY; //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) KRATOS_CATCH(""); } /// Update Dof values after a Newton-Raphson iteration. /** * @param rDofSet Container for the Degrees of freedom in the system * @param Dx Solution of the linear system */ virtual void UpdateDofs(DofsArrayType& rDofSet, TSystemVectorType& Dx) { KRATOS_TRY int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector DofSetPartition; OpenMPUtils::DivideInPartitions(rDofSet.size(), NumThreads, DofSetPartition); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); typename DofsArrayType::iterator DofsBegin = rDofSet.begin() + DofSetPartition[k]; typename DofsArrayType::iterator DofsEnd = rDofSet.begin() + DofSetPartition[k+1]; for (typename DofsArrayType::iterator itDof = DofsBegin; itDof != DofsEnd; ++itDof) { if (itDof->IsFree()) itDof->GetSolutionStepValue() += TSparseSpace::GetValue(Dx, itDof->EquationId()); } } KRATOS_CATCH("") } /// Update Dof values after a Newton-Raphson iteration /** * @param rModelPart fluid ModelPart * @param rBDFcoefs Time stepping coefficients for this iteration. */ void UpdateAcceleration(ModelPart& rModelPart, const Vector& rBDFcoefs) { KRATOS_TRY int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(rModelPart.NumberOfNodes(), NumThreads, NodePartition); const double Coef0 = rBDFcoefs[0]; const double Coef1 = rBDFcoefs[1]; const double Coef2 = rBDFcoefs[2]; #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = rModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = rModelPart.NodesBegin() + NodePartition[k+1]; for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { const array_1d<double,3>& rVel0 = itNode->FastGetSolutionStepValue(VELOCITY); const array_1d<double,3>& rVel1 = itNode->FastGetSolutionStepValue(VELOCITY,1); const array_1d<double,3>& rVel2 = itNode->FastGetSolutionStepValue(VELOCITY,2); array_1d<double,3>& rAcceleration = itNode->FastGetSolutionStepValue(ACCELERATION); rAcceleration = Coef0 * rVel0 + Coef1 * rVel1 + Coef2 * rVel2; } } KRATOS_CATCH("") } void CombineLHSContributions(LocalSystemMatrixType& rLHS, LocalSystemMatrixType& rMass, LocalSystemMatrixType& rDamp, const ProcessInfo& rCurrentProcessInfo) { const double Coef0 = rCurrentProcessInfo.GetValue(BDF_COEFFICIENTS)[0]; if (rMass.size1() != 0) noalias(rLHS) += Coef0 * rMass; if (rDamp.size1() != 0) noalias(rLHS) += rDamp; } template<class TObject> void AddDynamicRHSContribution(typename TObject::Pointer pObject, LocalSystemVectorType& rRHS, LocalSystemMatrixType& rMass, const ProcessInfo& rCurrentProcessInfo) { if (rMass.size1() != 0) { const Vector& rCoefs = rCurrentProcessInfo.GetValue(BDF_COEFFICIENTS); LocalSystemVectorType Acc; pObject->GetFirstDerivativesVector(Acc); Acc *= rCoefs[0]; for(unsigned int n = 1; n < 3; ++n) { LocalSystemVectorType rVel; pObject->GetFirstDerivativesVector(rVel,n); noalias(Acc) += rCoefs[n] * rVel; } noalias(rRHS) -= prod(rMass,Acc); } } void FullProjection(ModelPart& rModelPart) { const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // Initialize containers for (typename ModelPart::NodesContainerType::iterator ind = rModelPart.NodesBegin(); ind != rModelPart.NodesEnd(); ind++) { noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3); // "x" ind->FastGetSolutionStepValue(DIVPROJ) = 0.0; // "x" ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; // "Ml" } // Newton-Raphson parameters const double RelTol = 1e-4 * rModelPart.NumberOfNodes(); const double AbsTol = 1e-6 * rModelPart.NumberOfNodes(); const unsigned int MaxIter = 100; // iteration variables unsigned int iter = 0; array_1d<double,3> dMomProj = ZeroVector(3); double dMassProj = 0.0; double RelMomErr = 1000.0 * RelTol; double RelMassErr = 1000.0 * RelTol; double AbsMomErr = 1000.0 * AbsTol; double AbsMassErr = 1000.0 * AbsTol; while( ( (AbsMomErr > AbsTol && RelMomErr > RelTol) || (AbsMassErr > AbsTol && RelMassErr > RelTol) ) && iter < MaxIter) { // Reinitialize RHS for (typename ModelPart::NodesContainerType::iterator ind = rModelPart.NodesBegin(); ind != rModelPart.NodesEnd(); ind++) { noalias(ind->GetValue(ADVPROJ)) = ZeroVector(3); // "b" ind->GetValue(DIVPROJ) = 0.0; // "b" ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0; // Reset because Calculate will overwrite it } // Reinitialize errors RelMomErr = 0.0; RelMassErr = 0.0; AbsMomErr = 0.0; AbsMassErr = 0.0; // Compute new values array_1d<double, 3 > output; for (typename ModelPart::ElementsContainerType::iterator elem = rModelPart.ElementsBegin(); elem != rModelPart.ElementsEnd(); elem++) { elem->Calculate(SUBSCALE_VELOCITY, output, rCurrentProcessInfo); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ); // Update iteration variables for (typename ModelPart::NodesContainerType::iterator ind = rModelPart.NodesBegin(); ind != rModelPart.NodesEnd(); ind++) { const double Area = ind->FastGetSolutionStepValue(NODAL_AREA); // Ml dx = b - Mc x dMomProj = ind->GetValue(ADVPROJ) / Area; dMassProj = ind->GetValue(DIVPROJ) / Area; RelMomErr += sqrt( dMomProj[0]*dMomProj[0] + dMomProj[1]*dMomProj[1] + dMomProj[2]*dMomProj[2]); RelMassErr += fabs(dMassProj); array_1d<double,3>& rMomRHS = ind->FastGetSolutionStepValue(ADVPROJ); double& rMassRHS = ind->FastGetSolutionStepValue(DIVPROJ); rMomRHS += dMomProj; rMassRHS += dMassProj; AbsMomErr += sqrt( rMomRHS[0]*rMomRHS[0] + rMomRHS[1]*rMomRHS[1] + rMomRHS[2]*rMomRHS[2]); AbsMassErr += fabs(rMassRHS); } if(AbsMomErr > 1e-10) RelMomErr /= AbsMomErr; else // If residual is close to zero, force absolute convergence to avoid division by zero errors RelMomErr = 1000.0; if(AbsMassErr > 1e-10) RelMassErr /= AbsMassErr; else RelMassErr = 1000.0; iter++; } KRATOS_INFO_IF("GearScheme", rModelPart.GetCommunicator().MyPID() == 0) << "Performed OSS Projection in " << iter << " iterations" << std::endl; } void LumpedProjection(ModelPart& rModelPart) { const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (typename ModelPart::NodesContainerType::iterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { noalias(itNode->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3); itNode->FastGetSolutionStepValue(DIVPROJ) = 0.0; itNode->FastGetSolutionStepValue(NODAL_AREA) = 0.0; } array_1d<double, 3 > Out; for (typename ModelPart::ElementsContainerType::iterator itElem = rModelPart.ElementsBegin(); itElem != rModelPart.ElementsEnd(); itElem++) { itElem->Calculate(ADVPROJ, Out, rCurrentProcessInfo); } rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ); // Correction for periodic conditions if (mrPeriodicIdVar.Key() != 0) { this->PeriodicConditionProjectionCorrection(rModelPart); } for (typename ModelPart::NodesContainerType::iterator iNode = rModelPart.NodesBegin(); iNode != rModelPart.NodesEnd(); iNode++) { if (iNode->FastGetSolutionStepValue(NODAL_AREA) == 0.0) { iNode->FastGetSolutionStepValue(NODAL_AREA) = 1.0; } const double Area = iNode->FastGetSolutionStepValue(NODAL_AREA); iNode->FastGetSolutionStepValue(ADVPROJ) /= Area; iNode->FastGetSolutionStepValue(DIVPROJ) /= Area; } KRATOS_INFO_IF("GearScheme", rModelPart.GetCommunicator().MyPID() == 0) << "Computing OSS projections" << std::endl; } /** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on * both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n * 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n * 2- The non-historical containers are added across processes, communicating the right value from the condition owner to all partitions.\n * 3- The value on all periodic nodes is replaced by the one received in step 2. */ void PeriodicConditionProjectionCorrection(ModelPart& rModelPart) { const int num_nodes = rModelPart.NumberOfNodes(); const int num_conditions = rModelPart.NumberOfConditions(); #pragma omp parallel for for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; it_node->SetValue(NODAL_AREA,0.0); it_node->SetValue(ADVPROJ,ZeroVector(3)); it_node->SetValue(DIVPROJ,0.0); } #pragma omp parallel for for (int i = 0; i < num_conditions; i++) { auto it_cond = rModelPart.ConditionsBegin() + i; if(it_cond->Is(PERIODIC)) { this->AssemblePeriodicContributionToProjections(it_cond->GetGeometry()); } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA); rModelPart.GetCommunicator().AssembleNonHistoricalData(ADVPROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); #pragma omp parallel for for (int i = 0; i < num_nodes; i++) { auto it_node = rModelPart.NodesBegin() + i; this->CorrectContributionsOnPeriodicNode(*it_node); } } void AssemblePeriodicContributionToProjections(Geometry< Node<3> >& rGeometry) { unsigned int nodes_in_cond = rGeometry.PointsNumber(); double nodal_area = 0.0; array_1d<double,3> momentum_projection = ZeroVector(3); double mass_projection = 0.0; for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; nodal_area += r_node.FastGetSolutionStepValue(NODAL_AREA); noalias(momentum_projection) += r_node.FastGetSolutionStepValue(ADVPROJ); mass_projection += r_node.FastGetSolutionStepValue(DIVPROJ); } for ( unsigned int i = 0; i < nodes_in_cond; i++ ) { auto& r_node = rGeometry[i]; /* Note that this loop is expected to be threadsafe in normal conditions, * since each node should belong to a single periodic link. However, I am * setting the locks for openmp in case that we try more complicated things * in the future (like having different periodic conditions for different * coordinate directions). */ r_node.SetLock(); r_node.GetValue(NODAL_AREA) = nodal_area; noalias(r_node.GetValue(ADVPROJ)) = momentum_projection; r_node.GetValue(DIVPROJ) = mass_projection; r_node.UnSetLock(); } } void CorrectContributionsOnPeriodicNode(Node<3>& rNode) { if (rNode.GetValue(NODAL_AREA) != 0.0) // Only periodic nodes will have a non-historical NODAL_AREA set. { rNode.FastGetSolutionStepValue(NODAL_AREA) = rNode.GetValue(NODAL_AREA); noalias(rNode.FastGetSolutionStepValue(ADVPROJ)) = rNode.GetValue(ADVPROJ); rNode.FastGetSolutionStepValue(DIVPROJ) = rNode.GetValue(DIVPROJ); } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ /// Poiner to a turbulence model Process::Pointer mpTurbulenceModel = nullptr; typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater(); const Kratos::Variable<int>& mrPeriodicIdVar; // ///@} // ///@name Serialization // ///@{ // // friend class Serializer; // // virtual void save(Serializer& rSerializer) const // { // KRATOS_SERIALIZE_SAVE_BASE_CLASS(rSerializer, BaseType ); // rSerializer.save("mpTurbulenceModel",mpTurbulenceModel); // } // // virtual void load(Serializer& rSerializer) // { // KRATOS_SERIALIZE_LOAD_BASE_CLASS(rSerializer, BaseType ); // rSerializer.load("mpTurbulenceModel",mpTurbulenceModel); // } ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. GearScheme & operator=(GearScheme const& rOther) {} /// Copy constructor. GearScheme(GearScheme const& rOther) {} ///@} }; // Class GearScheme ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TSparseSpace,class TDenseSpace> inline std::istream& operator >>(std::istream& rIStream,GearScheme<TSparseSpace,TDenseSpace>& rThis) { return rIStream; } /// output stream function template<class TSparseSpace,class TDenseSpace> inline std::ostream& operator <<(std::ostream& rOStream,const GearScheme<TSparseSpace,TDenseSpace>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_GEAR_SCHEME_H_INCLUDED defined
rose_c99loop.c
/* Contributed by Jeff Keasler Liao, 10/22/2009 */ #include "omp.h" int main(int argc,char *argv[]) { double a[20UL][20UL]; for (int i = 0; i <= 18; i += 1) { #pragma omp parallel for for (int j = 0; j <= 19; j += 1) { a[i][j] += a[i + 1][j]; } } return 0; } // with shadow i and j void foo(int i,int j) { double a[20][20]; for (int i = 0; i <= 18; i += 1) { #pragma omp parallel for for (int j = 0; j <= 19; j += 1) { a[i][j] += a[i + 1][j]; } } }
fci_4pdm.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <assert.h> //#include <omp.h> #include "config.h" #include "vhf/fblas.h" #include "fci.h" #include "np_helper/np_helper.h" #define BLK 48 #define BUFBASE 96 double FCI_t1ci_sf(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb); /* * t2[:,i,j,k,l] = E^i_j E^k_l|ci0> */ static void rdm4_0b_t2(double *ci0, double *t2, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const int nnorb = norb * norb; const size_t n4 = nnorb * nnorb; int i, j, k, l, a, sign, str1; double *t1 = malloc(sizeof(double) * nb * nnorb); double *pt1, *pt2; _LinkT *tab; // form t1 which has beta^+ beta |t1> => target stra_id FCI_t1ci_sf(ci0, t1, nb, stra_id, 0, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); #pragma omp parallel private(i, j, k, l, a, str1, sign, pt1, pt2, tab) { #pragma omp for schedule(static, 1) nowait for (k = 0; k < bcount; k++) { NPdset0(t2+k*n4, n4); tab = clink_indexb + (strb_id+k) * nlinkb; for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pt1 = t1 + str1 * nnorb; pt2 = t2 + k * n4 + (i*norb+a)*nnorb; if (sign > 0) { for (l = 0; l < nnorb; l++) { pt2[l] += pt1[l]; } } else { for (l = 0; l < nnorb; l++) { pt2[l] -= pt1[l]; } } } } } free(t1); } /* * t2[:,i,j,k,l] = E^i_j E^k_l|ci0> */ static void rdm4_a_t2(double *ci0, double *t2, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const int nnorb = norb * norb; const size_t n4 = nnorb * nnorb; int i, j, k, l, a, sign, str1; double *pt1, *pt2; _LinkT *tab = clink_indexa + stra_id * nlinka; #pragma omp parallel private(i, j, k, l, a, str1, sign, pt1, pt2) { double *t1 = malloc(sizeof(double) * bcount * nnorb); #pragma omp for schedule(static, 40) for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); // form t1 which has alpha^+ alpha |t1> => target stra_id (through str1) FCI_t1ci_sf(ci0, t1, bcount, str1, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); for (k = 0; k < bcount; k++) { pt1 = t1 + k * nnorb; pt2 = t2 + k * n4 + (i*norb+a)*nnorb; if (sign > 0) { for (l = 0; l < nnorb; l++) { pt2[l] += pt1[l]; } } else { for (l = 0; l < nnorb; l++) { pt2[l] -= pt1[l]; } } } } free(t1); } } void FCI_t2ci_sf(double *ci0, double *t2, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { rdm4_0b_t2(ci0, t2, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); rdm4_a_t2 (ci0, t2, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); } static void tril3pdm_particle_symm(double *rdm3, double *tbra, double *t2ket, int bcount, int ncre, int norb) { assert(norb <= BLK); const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; int nnorb = norb * norb; int n4 = nnorb * nnorb; int i, j, k, m, n, blk1; int iblk = MIN(BLK/norb, norb); int blk = iblk * norb; //dgemm_(&TRANS_N, &TRANS_T, &n4, &nncre, &bcount, // &D1, t2ket, &n4, tbra, &nnorb, &D1, rdm3, &n4); // "upper triangle" F-array[k,j,i], k<=i<=j for (j = 0; j < ncre; j++) { for (n = 0; n < norb; n++) { for (k = 0; k < j+1-iblk; k+=iblk) { m = k * norb; i = m + blk; dgemm_(&TRANS_N, &TRANS_T, &i, &blk, &bcount, &D1, t2ket, &n4, tbra+m, &nnorb, &D1, rdm3+m*n4, &n4); } m = k * norb; i = (j+1) * norb; blk1 = i - m; dgemm_(&TRANS_N, &TRANS_T, &i, &blk1, &bcount, &D1, t2ket, &n4, tbra+m, &nnorb, &D1, rdm3+m*n4, &n4); t2ket += nnorb; rdm3 += nnorb; } } } static void tril2pdm_particle_symm(double *rdm2, double *tbra, double *tket, int bcount, int ncre, int norb) { assert(norb <= BLK); const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; int nnorb = norb * norb; int nncre = norb * ncre; int m, n; int blk = MIN(BLK/norb, norb) * norb; //dgemm_(&TRANS_N, &TRANS_T, &nncre, &nncre, &bcount, // &D1, tket, &nnorb, tbra, &nnorb, &D1, rdm2, &nnorb); // upper triangle part of F-array for (m = 0; m < nncre-blk; m+=blk) { n = m + blk; dgemm_(&TRANS_N, &TRANS_T, &n, &blk, &bcount, &D1, tket, &nnorb, tbra+m, &nnorb, &D1, rdm2+m*nnorb, &nnorb); } n = nncre - m; dgemm_(&TRANS_N, &TRANS_T, &nncre, &n, &bcount, &D1, tket, &nnorb, tbra+m, &nnorb, &D1, rdm2+m*nnorb, &nnorb); } static void make_rdm12_sf(double *rdm1, double *rdm2, double *bra, double *ket, double *t1bra, double *t1ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb) { const char TRANS_N = 'N'; const char TRANS_T = 'T'; const int INC1 = 1; const double D1 = 1; const int nnorb = norb * norb; int k, l; size_t n; double *tbra = malloc(sizeof(double) * nnorb * bcount); double *pbra, *pt1; for (n = 0; n < bcount; n++) { pbra = tbra + n * nnorb; pt1 = t1bra + n * nnorb; for (k = 0; k < norb; k++) { for (l = 0; l < norb; l++) { pbra[k*norb+l] = pt1[l*norb+k]; } } } dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, t1ket, &nnorb, tbra, &nnorb, &D1, rdm2, &nnorb); dgemv_(&TRANS_N, &nnorb, &bcount, &D1, t1ket, &nnorb, bra+stra_id*nb+strb_id, &INC1, &D1, rdm1, &INC1); free(tbra); } static void make_rdm12_spin0(double *rdm1, double *rdm2, double *bra, double *ket, double *t1bra, double *t1ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb) { const char TRANS_N = 'N'; const char TRANS_T = 'T'; const int INC1 = 1; const double D1 = 1; const int nnorb = norb * norb; int k, l; size_t n; double *tbra = malloc(sizeof(double) * nnorb * bcount); double *pbra, *pt1; double factor; for (n = 0; n < bcount; n++) { if (n+strb_id == stra_id) { factor = 1; } else { factor = 2; } pbra = tbra + n * nnorb; pt1 = t1bra + n * nnorb; for (k = 0; k < norb; k++) { for (l = 0; l < norb; l++) { pbra[k*norb+l] = pt1[l*norb+k] * factor; } } } dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, t1ket, &nnorb, tbra, &nnorb, &D1, rdm2, &nnorb); dgemv_(&TRANS_N, &nnorb, &bcount, &D1, tbra, &nnorb, bra+stra_id*na+strb_id, &INC1, &D1, rdm1, &INC1); free(tbra); } void FCI4pdm_kern_sf(double *rdm1, double *rdm2, double *rdm3, double *rdm4, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const int nnorb = norb * norb; const size_t n4 = nnorb * nnorb; const size_t n3 = nnorb * norb; const size_t n6 = nnorb * nnorb * nnorb; int i, j, k, l, ij; size_t n; double *tbra; double *t1bra = malloc(sizeof(double) * nnorb * bcount * 2); double *t2bra = malloc(sizeof(double) * n4 * bcount * 2); double *t1ket = t1bra + nnorb * bcount; double *t2ket = t2bra + n4 * bcount; double *pbra, *pt2; // t2[:,i,j,k,l] = E^i_j E^k_l|ket> FCI_t1ci_sf(bra, t1bra, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(bra, t2bra, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (bra == ket) { t1ket = t1bra; t2ket = t2bra; } else { FCI_t1ci_sf(ket, t1ket, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(ket, t2ket, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); } #pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2) { tbra = malloc(sizeof(double) * nnorb * bcount); #pragma omp for schedule(static, 1) nowait for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k) for (n = 0; n < bcount; n++) { for (k = 0; k < norb; k++) { pbra = tbra + n * nnorb + k*norb; pt2 = t2bra + n * n4 + k*nnorb + ij; for (l = 0; l < norb; l++) { pbra[l] = pt2[l*n3]; } } } i = ij / norb; j = ij - i * norb; // contract <bra-of-Eij| with |E^k_l E^m_n ket> tril3pdm_particle_symm(rdm4+(j*norb+i)*n6, tbra, t2ket, bcount, j+1, norb); // rdm3 tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket, bcount, j+1, norb); } free(tbra); } make_rdm12_sf(rdm1, rdm2, bra, ket, t1bra, t1ket, bcount, stra_id, strb_id, norb, na, nb); free(t1bra); free(t2bra); } /* * use symmetry ci0[a,b] == ci0[b,a], t2[a,b,...] == t2[b,a,...] */ void FCI4pdm_kern_spin0(double *rdm1, double *rdm2, double *rdm3, double *rdm4, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { int fill1; if (strb_id+bcount <= stra_id) { fill1 = bcount; } else if (stra_id >= strb_id) { fill1 = stra_id - strb_id + 1; } else { return; } const int nnorb = norb * norb; const size_t n4 = nnorb * nnorb; const size_t n3 = nnorb * norb; const size_t n6 = nnorb * nnorb * nnorb; int i, j, k, l, ij; size_t n; double factor; double *tbra; double *t1bra = malloc(sizeof(double) * nnorb * fill1 * 2); double *t2bra = malloc(sizeof(double) * n4 * fill1 * 2); double *t1ket = t1bra + nnorb * fill1; double *t2ket = t2bra + n4 * fill1; double *pbra, *pt2; FCI_t1ci_sf(bra, t1bra, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(bra, t2bra, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (bra == ket) { t1ket = t1bra; t2ket = t2bra; } else { FCI_t1ci_sf(ket, t1ket, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(ket, t2ket, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); } #pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2, factor) { tbra = malloc(sizeof(double) * nnorb * fill1); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k) i = ij / norb; j = ij - i * norb; for (n = 0; n < fill1; n++) { if (n+strb_id == stra_id) { factor = 1; } else { factor = 2; } for (k = 0; k <= j; k++) { pbra = tbra + n * nnorb + k*norb; pt2 = t2bra + n * n4 + k*nnorb + ij; for (l = 0; l < norb; l++) { pbra[l] = pt2[l*n3] * factor; } } } // contract <bra-of-Eij| with |E^k_l E^m_n ket> tril3pdm_particle_symm(rdm4+(j*norb+i)*n6, tbra, t2ket, fill1, j+1, norb); // rdm3 tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket, fill1, j+1, norb); } free(tbra); } make_rdm12_spin0(rdm1, rdm2, bra, ket, t1bra, t1ket, fill1, stra_id, strb_id, norb, na, nb); free(t1bra); free(t2bra); } /* * This function returns incomplete rdm3, rdm4, in which, particle * permutation symmetry is assumed. * kernel can be FCI4pdm_kern_sf, FCI4pdm_kern_spin0 */ void FCIrdm4_drv(void (*kernel)(), double *rdm1, double *rdm2, double *rdm3, double *rdm4, double *bra, double *ket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { const int nnorb = norb * norb; const size_t n4 = nnorb * nnorb; int ib, strk, bcount; _LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na); _LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clinka, link_indexa, norb, na, nlinka); FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb); NPdset0(rdm1, nnorb); NPdset0(rdm2, n4); NPdset0(rdm3, n4 * nnorb); NPdset0(rdm4, n4 * n4); for (strk = 0; strk < na; strk++) { for (ib = 0; ib < nb; ib += BUFBASE) { bcount = MIN(BUFBASE, nb-ib); (*kernel)(rdm1, rdm2, rdm3, rdm4, bra, ket, bcount, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } } free(clinka); free(clinkb); } void FCI3pdm_kern_sf(double *rdm1, double *rdm2, double *rdm3, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const int nnorb = norb * norb; const size_t n4 = nnorb * nnorb; const size_t n3 = nnorb * norb; int i, j, k, l, ij; size_t n; double *tbra; double *t1bra = malloc(sizeof(double) * nnorb * bcount); double *t1ket = malloc(sizeof(double) * nnorb * bcount); double *t2bra = malloc(sizeof(double) * n4 * bcount); double *pbra, *pt2; // t2[:,i,j,k,l] = E^i_j E^k_l|ket> FCI_t1ci_sf(bra, t1bra, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(bra, t2bra, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t1ci_sf(ket, t1ket, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); #pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2) { tbra = malloc(sizeof(double) * nnorb * bcount); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k) for (n = 0; n < bcount; n++) { pbra = tbra + n * nnorb; pt2 = t2bra + n * n4 + ij; for (k = 0; k < norb; k++) { for (l = 0; l < norb; l++) { pbra[k*norb+l] = pt2[l*n3+k*nnorb]; } } } i = ij / norb; j = ij - i * norb; tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket, bcount, j+1, norb); } free(tbra); } make_rdm12_sf(rdm1, rdm2, bra, ket, t1bra, t1ket, bcount, stra_id, strb_id, norb, na, nb); free(t1bra); free(t1ket); free(t2bra); } /* * use symmetry ci0[a,b] == ci0[b,a], t2[a,b,...] == t2[b,a,...] */ void FCI3pdm_kern_spin0(double *rdm1, double *rdm2, double *rdm3, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { int fill1; if (strb_id+bcount <= stra_id) { fill1 = bcount; } else if (stra_id >= strb_id) { fill1 = stra_id - strb_id + 1; } else { return; } const int nnorb = norb * norb; const size_t n4 = nnorb * nnorb; const size_t n3 = nnorb * norb; int i, j, k, l, ij; size_t n; double factor; double *tbra; double *t1bra = malloc(sizeof(double) * nnorb * fill1); double *t1ket = malloc(sizeof(double) * nnorb * fill1); double *t2bra = malloc(sizeof(double) * n4 * fill1); double *pbra, *pt2; // t2[:,i,j,k,l] = E^i_j E^k_l|ket> FCI_t2ci_sf(bra, t2bra, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t1ci_sf(bra, t1bra, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t1ci_sf(ket, t1ket, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); #pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2, factor) { tbra = malloc(sizeof(double) * nnorb * fill1); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k) i = ij / norb; j = ij - i * norb; for (n = 0; n < fill1; n++) { if (n+strb_id == stra_id) { factor = 1; } else { factor = 2; } for (k = 0; k <= j; k++) { pbra = tbra + n * nnorb + k*norb; pt2 = t2bra + n * n4 + k*nnorb + ij; for (l = 0; l < norb; l++) { pbra[l] = pt2[l*n3] * factor; } } } tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket, fill1, j+1, norb); } free(tbra); } make_rdm12_spin0(rdm1, rdm2, bra, ket, t1bra, t1ket, fill1, stra_id, strb_id, norb, na, nb); free(t1bra); free(t1ket); free(t2bra); } /* * This function returns incomplete rdm3, in which, particle * permutation symmetry is assumed. * kernel can be FCI3pdm_kern_ms0, FCI3pdm_kern_spin0 */ void FCIrdm3_drv(void (*kernel)(), double *rdm1, double *rdm2, double *rdm3, double *bra, double *ket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { const int nnorb = norb * norb; const size_t n4 = nnorb * nnorb; int ib, strk, bcount; _LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na); _LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clinka, link_indexa, norb, na, nlinka); FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb); NPdset0(rdm1, nnorb); NPdset0(rdm2, n4); NPdset0(rdm3, n4 * nnorb); for (strk = 0; strk < na; strk++) { for (ib = 0; ib < nb; ib += BUFBASE) { bcount = MIN(BUFBASE, nb-ib); (*kernel)(rdm1, rdm2, rdm3, bra, ket, bcount, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } } free(clinka); free(clinkb); }
quick_sort.h
#ifndef SORT_QUICK_SORT_H #define SORT_QUICK_SORT_H #include <algorithm> #include <omp.h> template<class RandIter> void quick_sort(RandIter first, RandIter last) { auto start = first, end = last; auto mid = start + (end - start) / 2; --end; do { while (*start < *mid) { start++; } while (*end > *mid) { end--; } if (start <= end) { std::swap(*start, *end); start++; end--; } } while (start <= end); if (end > first) { quick_sort(first, end + 1); } if (start < last) { quick_sort(start, last); } } template<class RandIter> void quick_sort_par_omp_impl(RandIter first, RandIter last) { auto start = first, end = last; auto mid = start + (end - start) / 2; --end; do { while (*start < *mid) { start++; } while (*end > *mid) { end--; } if (start <= end) { std::swap(*start, *end); start++; end--; } } while (start <= end); if (end > first) { #pragma omp task { if (end - first <= 150) { quick_sort(first, end + 1); } else { quick_sort_par_omp_impl(first, end + 1); } } } if (start < last) { if (last - start <= 150) { quick_sort(start, last); } else { quick_sort_par_omp_impl(start, last); } } #pragma omp taskwait } template<class RandIter> void quick_sort_par_omp(RandIter first, RandIter last) { #pragma omp parallel { #pragma omp single { quick_sort_par_omp_impl(first, last); } } } #endif // SORT_QUICK_SORT_H
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // call `.bind("name")` on match expressions that match the nodes you want to // access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child"))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the `.bind()` calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/LambdaCapture.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/ParentMapContext.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); return RegExp->match(Filename); } /// Matches statements that are (transitively) expanded from the named macro. /// Does not match if only part of the statement is expanded from that macro or /// if different parts of the the statement are expanded from different /// appearances of the macro. /// /// FIXME: Change to be a polymorphic matcher that works on any syntactic /// node. There's nothing `Stmt`-specific about it. AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) { // Verifies that the statement' beginning and ending are both expanded from // the same instance of the given macro. auto& Context = Finder->getASTContext(); llvm::Optional<SourceLocation> B = internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context); if (!B) return false; llvm::Optional<SourceLocation> E = internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context); if (!E) return false; return *B == *E; } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches decomposition-declarations. /// /// Examples matches the declaration node with \c foo and \c bar, but not /// \c number. /// (matcher = declStmt(has(decompositionDecl()))) /// /// \code /// int number = 42; /// auto [foo, bar] = std::make_pair{42, 42}; /// \endcode extern const internal::VariadicAllOfMatcher<DecompositionDecl> decompositionDecl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template arguments (with location info). /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgumentLoc() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc> templateArgumentLoc; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches template template parameter declarations. /// /// Given /// \code /// template <template <typename> class Z, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'Z', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTemplateParmDecl> templateTemplateParmDecl; /// Matches public C++ declarations and C++ base specifers that specify public /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; // fieldDecl(isPublic()) matches 'a' /// protected: int b; /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived1 : public Base {}; // matches 'Base' /// struct Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPublic, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_public; } /// Matches protected C++ declarations and C++ base specifers that specify /// protected inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; // fieldDecl(isProtected()) matches 'b' /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived : protected Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isProtected, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_protected; } /// Matches private C++ declarations and C++ base specifers that specify private /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; // fieldDecl(isPrivate()) matches 'c' /// }; /// \endcode /// /// \code /// struct Base {}; /// struct Derived1 : private Base {}; // matches 'Base' /// class Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPrivate, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder); } /// Causes all nested matchers to be matched with the specified traversal kind. /// /// Given /// \code /// void foo() /// { /// int i = 3.0; /// } /// \endcode /// The matcher /// \code /// traverse(TK_IgnoreImplicitCastsAndParentheses, /// varDecl(hasInitializer(floatLiteral().bind("init"))) /// ) /// \endcode /// matches the variable declaration with "init" bound to the "3.0". template <typename T> internal::Matcher<T> traverse(TraversalKind TK, const internal::Matcher<T> &InnerMatcher) { return internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>(); } template <typename T> internal::BindableMatcher<T> traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) { return internal::BindableMatcher<T>( internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>()); } template <typename... T> internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>> traverse(TraversalKind TK, const internal::VariadicOperatorMatcher<T...> &InnerMatcher) { return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>( TK, InnerMatcher); } template <template <typename ToArg, typename FromArg> class ArgumentAdapterT, typename T, typename ToTypes> internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>> traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor< ArgumentAdapterT, T, ToTypes> &InnerMatcher) { return internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>(TK, InnerMatcher); } template <template <typename T, typename P1> class MatcherT, typename P1, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1< MatcherT, P1, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>( TK, InnerMatcher); } template <template <typename T, typename P1, typename P2> class MatcherT, typename P1, typename P2, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2< MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>( TK, InnerMatcher); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that refers to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches tag declarations. /// /// Example matches X, Z, U, S, E /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// enum E { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches noexcept expressions. /// /// Given /// \code /// bool a() noexcept; /// bool b() noexcept(true); /// bool c() noexcept(false); /// bool d() noexcept(noexcept(a())); /// bool e = noexcept(b()) || noexcept(c()); /// \endcode /// cxxNoexceptExpr() /// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`. /// doesn't match the noexcept specifier in the declarations a, b, c or d. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr> cxxNoexceptExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches fixed point literals extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral> fixedPointLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches any node regardless of the submatcher. /// /// However, \c optionally will retain any bindings generated by the submatcher. /// Useful when additional information which may or may not present about a main /// matching node is desired. /// /// For example, in: /// \code /// class Foo { /// int bar; /// } /// \endcode /// The matcher: /// \code /// cxxRecordDecl( /// optionally(has( /// fieldDecl(hasName("bar")).bind("var") /// ))).bind("record") /// \endcode /// will produce a result binding for both "record" and "var". /// The matcher will produce a "record" binding for even if there is no data /// member named "bar" in that class. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::BindableMatcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::BindableMatcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(StringRef Name) { return internal::Matcher<NamedDecl>( new internal::HasNameMatcher({std::string(Name)})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) { std::string FullNameString = "::" + Node.getQualifiedNameAsString(); return RegExp->match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>( {std::string(Name)}); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// hasAnyOverloadesOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>, StringRef, internal::hasAnyOverloadedOperatorNameFunc> hasAnyOverloadedOperatorName; /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ classes that have a direct or indirect base matching \p /// BaseSpecMatcher. /// /// Example: /// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived /// \endcode /// // FIXME: Refactor this and isDerivedFrom to reuse implementation. AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder); } /// Matches C++ classes that have a direct base matching \p BaseSpecMatcher. /// /// Example: /// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; // doesn't match /// \endcode AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return Node.hasDefinition() && llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) { return BaseSpecMatcher.matches(Base, Finder, Builder); }); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, Builder); } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) { std::string SelectorString = Node.getSelector().getAsString(); return RegExp->match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Example matches class Derived /// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base")))))) /// \code /// class Base {}; /// class Derived : Base {}; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>, /// Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl, CXXBaseSpecifier), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N) { return Node.getNumArgs() == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { return (N < Node.getNumArgs() && InnerMatcher.matches( *Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches any capture of a lambda expression. /// /// Given /// \code /// void foo() { /// int x; /// auto f = [x](){}; /// } /// \endcode /// lambdaExpr(hasAnyCapture(anything())) /// matches [x](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>, InnerMatcher, 0) { for (const LambdaCapture &Capture : Node.captures()) { if (Capture.capturesVariable()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) { *Builder = std::move(Result); return true; } } } return false; } /// Matches any capture of 'this' in a lambda expression. /// /// Given /// \code /// struct foo { /// void bar() { /// auto f = [this](){}; /// } /// } /// \endcode /// lambdaExpr(hasAnyCapture(cxxThisExpr())) /// matches [this](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<CXXThisExpr>, InnerMatcher, 1) { return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) { return LC.capturesThis(); }); } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches all arguments and their respective types for a \c CallExpr or /// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but /// it works on calls through function pointers as well. /// /// The difference is, that function pointers do not provide access to a /// \c ParmVarDecl, but only the \c QualType for each argument. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// void (*f_ptr)(int) = f; /// f_ptr(y); /// \endcode /// callExpr( /// forEachArgumentWithParamType( /// declRefExpr(to(varDecl(hasName("y")))), /// qualType(isInteger()).bind("type) /// )) /// matches f(y) and f_ptr(y) /// with declRefExpr(...) /// matching int y /// and qualType(...) /// matching int AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<QualType>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; const FunctionProtoType *FProto = nullptr; if (const auto *Call = dyn_cast<CallExpr>(&Node)) { if (const auto *Value = dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) { QualType QT = Value->getType().getCanonicalType(); // This does not necessarily lead to a `FunctionProtoType`, // e.g. K&R functions do not have a function prototype. if (QT->isFunctionPointerType()) FProto = QT->getPointeeType()->getAs<FunctionProtoType>(); if (QT->isMemberFunctionPointerType()) { const auto *MP = QT->getAs<MemberPointerType>(); assert(MP && "Must be member-pointer if its a memberfunctionpointer"); FProto = MP->getPointeeType()->getAs<FunctionProtoType>(); assert(FProto && "The call must have happened through a member function " "pointer"); } } } int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex, ++ParamIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); // This test is cheaper compared to the big matcher in the next if. // Therefore, please keep this order. if (FProto) { QualType ParamType = FProto->getParamType(ParamIndex); if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))), callExpr(callee(functionDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } } *Builder = std::move(Result); return Matched; } /// Matches the ParmVarDecl nodes that are at the N'th position in the parameter /// list. The parameter list could be that of either a block, function, or /// objc-method. /// /// /// Given /// /// \code /// void f(int a, int b, int c) { /// } /// \endcode /// /// ``parmVarDecl(isAtPosition(0))`` matches ``int a``. /// /// ``parmVarDecl(isAtPosition(1))`` matches ``int b``. AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) { const clang::DeclContext *Context = Node.getParentFunctionOrMethod(); if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; return false; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches selection statements with initializer. /// /// Given: /// \code /// void foo() { /// if (int i = foobar(); i > 0) {} /// switch (int i = foobar(); i) {} /// for (auto& a = get_range(); auto& x : a) {} /// } /// void bar() { /// if (foobar() > 0) {} /// switch (foobar()) {} /// for (auto& x : get_range()) {} /// } /// \endcode /// ifStmt(hasInitStatement(anything())) /// matches the if statement in foo but not in bar. /// switchStmt(hasInitStatement(anything())) /// matches the switch statement in foo but not in bar. /// cxxForRangeStmt(hasInitStatement(anything())) /// matches the range for statement in foo but not in bar. AST_POLYMORPHIC_MATCHER_P(hasInitStatement, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt, CXXForRangeStmt), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *Init = Node.getInit(); return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. Note that in case of functions /// this matcher only matches the definition itself and not the other /// declarations of the same function. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' /// /// Given /// \code /// void f(); /// void f() {} /// \endcode /// hasBody(functionDecl()) /// matches 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void f();' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches a function declaration that has a given body present in the AST. /// Note that this matcher matches all the declarations of a function whose /// body is present in the AST. /// /// Given /// \code /// void f(); /// void f() {} /// void g(); /// \endcode /// hasAnyBody(functionDecl()) /// matches both 'void f();' /// and 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void g();' AST_MATCHER_P(FunctionDecl, hasAnyBody, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = Node.getBody(); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P(hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator), std::string, Name) { return Name == Node.getOpcodeStr(Node.getOpcode()); } /// Matches operator expressions (binary or unary) that have any of the /// specified names. /// /// hasAnyOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOperatorName("+"), hasOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasAnyOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>, StringRef, internal::hasAnyOperatorNameFunc> hasAnyOperatorName; /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; } /// \endcode AST_POLYMORPHIC_MATCHER(isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isAssignmentOp(); } /// Matches comparison operators. /// /// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 < s2 /// (matcher = cxxOperatorCallExpr(isComparisonOperator())) /// \code /// struct S { bool operator<(const S& other); }; /// void x(S s1, S s2) { bool b1 = s1 < s2; } /// \endcode AST_POLYMORPHIC_MATCHER(isComparisonOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isComparisonOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = Node.getLHS(); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = Node.getRHS(); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. inline internal::Matcher<BinaryOperator> hasEitherOperand( const internal::Matcher<Expr> &InnerMatcher) { return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)); } /// Matches if both matchers match with opposite sides of the binary operator. /// /// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1), /// integerLiteral(equals(2))) /// \code /// 1 + 2 // Match /// 2 + 1 // Match /// 1 + 1 // No match /// 2 + 2 // No match /// \endcode inline internal::Matcher<BinaryOperator> hasOperands(const internal::Matcher<Expr> &Matcher1, const internal::Matcher<Expr> &Matcher2) { return anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)), allOf(hasLHS(Matcher2), hasRHS(Matcher1))); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_MATCHER_P(UnaryOperator, hasUnaryOperand, internal::Matcher<Expr>, InnerMatcher) { const Expr * const Operand = Node.getSubExpr(); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches TagDecl object that are spelled with "struct." /// /// Example matches S, but not C, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isStruct) { return Node.isStruct(); } /// Matches TagDecl object that are spelled with "union." /// /// Example matches U, but not C, S or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isUnion) { return Node.isUnion(); } /// Matches TagDecl object that are spelled with "class." /// /// Example matches C, but not S, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isClass) { return Node.isClass(); } /// Matches TagDecl object that are spelled with "enum." /// /// Example matches E, but not C, S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isEnum) { return Node.isEnum(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches declarations of virtual methods and C++ base specifers that specify /// virtual inheritance. /// /// Example: /// \code /// class A { /// public: /// virtual void x(); // matches x /// }; /// \endcode /// /// Example: /// \code /// class Base {}; /// class DirectlyDerived : virtual Base {}; // matches Base /// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base /// \endcode /// /// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER(isVirtual, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl, CXXBaseSpecifier)) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches C++17 deduced template specialization types, e.g. deduced class /// template types. /// /// Given /// \code /// template <typename T> /// class C { public: C(T); }; /// /// C c(123); /// \endcode /// \c deducedTemplateSpecializationType() matches the type in the declaration /// of the variable \c c. extern const AstTypeMatcher<DeducedTemplateSpecializationType> deducedTemplateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whoes decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER(Expr, nullPointerConstant) { return Node.isNullPointerConstant(Finder->getASTContext(), Expr::NPC_ValueDependentIsNull); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode /// /// Deprecated. Use hasInitializer() instead to be able to /// match on the contents of the default argument. For example: /// /// \code /// void x(int val = 7) {} /// void y(int val = 42) {} /// \endcode /// parmVarDecl(hasInitializer(integerLiteral(equals(42)))) /// matches the parameter of y /// /// A matcher such as /// parmVarDecl(hasInitializer(anything())) /// is equivalent to parmVarDecl(hasDefaultArgument()). AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage, 16) MyClass(); /// \endcode /// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16)))) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index, internal::Matcher<Expr>, InnerMatcher) { return Node.getNumPlacementArgs() > Index && InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder); } /// Matches any placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage) MyClass(); /// \endcode /// cxxNewExpr(hasAnyPlacementArg(anything())) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>, InnerMatcher) { return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) { return InnerMatcher.matches(*Arg, Finder, Builder); }); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(integerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and /// ``default(firstprivate)`` extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared; } /// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind /// specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isFirstPrivateKind())`` matches only /// ``default(firstprivate)``. AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return llvm::omp::isAllowedClauseForDirective( Node.getDirectiveKind(), CKind, Finder->getASTContext().getLangOpts().OpenMP); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
Tanh.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/Tanh.c" #else static int nn_(Tanh_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); THTensor_(resizeAs)(output, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output)) { TH_TENSOR_APPLY2(real, output, real, input, \ *output_data = tanh(*input_data);); } else { real* output_data = THTensor_(data)(output); real* input_data = THTensor_(data)(input); long k; #pragma omp parallel for private(k) for (k = 0; k < input->size[0]; k++) { real* ptr_output = output_data + k*input->stride[0]; real* ptr_input = input_data + k*input->stride[0]; long i; for (i = 0; i < input->stride[0]; i++) ptr_output[i] = tanh(ptr_input[i]); } } return 1; } static int nn_(Tanh_updateGradInput)(lua_State *L) { THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor); THTensor_(resizeAs)(gradInput, output); if (output->nDimension == 1 || !THTensor_(isContiguous)(output) || !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, output, \ real z = *output_data; \ *gradInput_data = *gradOutput_data * (1. - z*z);); } else { real* gradOutput_data = THTensor_(data)(gradOutput); real* gradInput_data = THTensor_(data)(gradInput); real* output_data = THTensor_(data)(output); long k; #pragma omp parallel for private(k) for (k = 0; k < output->size[0]; k++) { real* ptr_gradOutput = gradOutput_data + k*output->stride[0]; real* ptr_gradInput = gradInput_data + k*output->stride[0]; real* ptr_output = output_data + k*output->stride[0]; long i; for (i = 0; i < output->stride[0]; i++) { real z = ptr_output[i]; ptr_gradInput[i] = ptr_gradOutput[i] * (1. - z*z); } } } return 1; } static const struct luaL_Reg nn_(Tanh__) [] = { {"Tanh_updateOutput", nn_(Tanh_updateOutput)}, {"Tanh_updateGradInput", nn_(Tanh_updateGradInput)}, {NULL, NULL} }; static void nn_(Tanh_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nn_(Tanh__), "nn"); lua_pop(L,1); } #endif
GrB_Semiring_wait.c
//------------------------------------------------------------------------------ // GrB_Semiring_wait: wait for a user-defined GrB_Semiring to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // In SuiteSparse:GraphBLAS, a user-defined GrB_Semiring has no pending // operations to wait for. All this method does is verify that the semiring is // properly initialized, and then it does an OpenMP flush. #include "GB.h" GrB_Info GrB_Semiring_wait // no work, just check if the GrB_Semiring is valid ( #if (GxB_IMPLEMENTATION_MAJOR <= 5) GrB_Semiring *semiring #else GrB_Semiring semiring, GrB_WaitMode waitmode #endif ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- #if (GxB_IMPLEMENTATION_MAJOR <= 5) GB_WHERE1 ("GrB_Semiring_wait (&semiring)") ; GB_RETURN_IF_NULL (semiring) ; GB_RETURN_IF_NULL_OR_FAULTY (*semiring) ; #else GB_WHERE1 ("GrB_Semiring_wait (semiring, mode)") ; GB_RETURN_IF_NULL_OR_FAULTY (semiring) ; #endif //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
GB_unop__ainv_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_fc32_fc32) // op(A') function: GB (_unop_tran__ainv_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_FC32_ainv (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_FC32_ainv (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_FC32_ainv (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_FC32_ainv (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_FC32_ainv (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_fp32 // op(A') function: GB_tran__identity_bool_fp32 // C type: bool // A type: float // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_fp32 ( bool *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
6806.c
/* * Compile using the command: * `cc 27Stencil.c -o oa -fopenmp -lm` */ #include <math.h> #include <omp.h> #include <stdint.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENACC #include <openacc.h> #endif #define DEFAULT_DATASIZE 1048576 /* Default datasize. */ #define DEFAULT_REPS 10 /* Default repetitions. */ #define CONF95 1.96 #define ITERATIONS 10 #define FAC (1./26) #define TOLERANCE 1.0e-15 extern int reps; /* Repetitions. */ extern double *times; /* Array to store results in. */ extern int flag; /* Flag to set CPU or GPU invocation. */ extern unsigned int datasize; /* Datasize passed to benchmark functions. */ unsigned int datasize = -1; /* Datasize for tests in bytes. */ int reps = -1; /* Repetitions. */ double *times; /* Array of doubles storing the benchmark times in microseconds. */ double testtime; /* The average test time in microseconds for reps runs. */ double testsd; /* The standard deviation in the test time in microseconds for reps runs. */ int flag = 0; /* 0 indicates CPU. */ /* * Function prototypes for common functions. */ void init(int argc, char **argv); void finalisetest(char *); void finalise(void); void benchmark(char *, double (*test)(void)); void print_results(char *, double, double); /* Forward Declarations of utility functions*/ double max_diff(double *, double *, int); void wul(); void usage(char *argv[]) { printf("Usage: %s \n" "\t--reps <repetitions> (default %d)\n" "\t--datasize <datasize> (default %d bytes)\n", argv[0], DEFAULT_REPS, DEFAULT_DATASIZE); } /* * This function parses the parameters from the command line. */ void parse_args(int argc, char *argv[]) { int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--reps") == 0) { reps = atoi(argv[++arg]); if (reps == 0) { printf("Invalid integer:--reps: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--datasize") == 0) { datasize = atoi(argv[++arg]); if (datasize == 0) { printf("Invalid integer:--datasize: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd; int i, good_reps; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; good_reps = 0; for (i = 0; i < reps; i++) { /* Skip entries where times is 0, this indicates an error occured */ if (times[i] != 0){ mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; good_reps++; } } meantime = totaltime / good_reps; sumsq = 0; for (i = 0; i < reps; i++) { if (times[i] != 0){ sumsq += (times[i] - meantime) * (times[i] - meantime); } } sd = sqrt(sumsq / good_reps); *mtp = meantime; *sdp = sd; } /* * This function prints the results of the tests. * If you use a compiler which sets a different preprocessor flag * you may wish to add it here. */ void print_results(char *name, double testtime, double testsd) { char compiler[20]; /* Set default compiler idetifier. */ sprintf(compiler, "COMPILER"); /* Set compiler identifier based on known preprocessor flags. */ #ifdef __PGI sprintf(compiler, "PGI"); #endif #ifdef __HMPP sprintf(compiler, "CAPS"); #endif //printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6); printf("%f\n", testtime*1e6); } /* * This function initialises the storage for the test results and set the defaults. */ void init(int argc, char **argv) { parse_args(argc, argv); if (reps == -1) { reps = DEFAULT_REPS; } if (datasize == (unsigned int)-1) { datasize = DEFAULT_DATASIZE; } times = (double *)malloc((reps) * sizeof(double)); /* #ifdef __PGI acc_init(acc_device_nvidia); // printf("PGI INIT\n"); #endif #ifdef __HMPP int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif #ifdef _CRAYC int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif */ } void finalise(void) { free(times); } /* * This function runs the benchmark specified. */ void benchmark(char *name, double (*test)(void)) { int i = 0; double tmp = 0; for (i=0; i<reps; i++) { tmp = test(); if (tmp == -10000){ printf("Memory allocation failure in %s\n", name); times[i] = 0; } else if (tmp == -11000){ printf("CPU/GPU mismatch in %s\n", name); times[i] = 0; } else{ times[i] = tmp; } } stats(&testtime, &testsd); //printf("in benchmark\n"); print_results(name, testtime, testsd); //printf("printed result\n"); } double stencil() { extern unsigned int datasize; int sz = cbrt((datasize/sizeof(double))/2); int i, j, k, iter; int n = sz-2; double fac = FAC; double t1, t2; double md; //printf("size = %d\n", sz); /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz); double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz); double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz); if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ return(-10000); } /* initialize input array a0 */ /* zero all of array (including halos) */ //printf("size = %d\n", sz); for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = 0.0; //printf("%d\t", (i*sz*sz+j*sz+k)); } } } //printf("\n"); //int size_of_a0 = sizeof(a0) / sizeof(*a0); //printf("size of a0 = %d\n", size_of_a0); /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */ /* save initial input array for later GPU run */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; } } } //printf("Host computation\n"); /* run main computation on host */ for (iter = 0; iter < ITERATIONS; iter++) { for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ /* save result */ /* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; // printf("%lf\t", a0[i*sz*sz+j*sz+k]); } } } //int size = sizeof(host_result)/sizeof(host_result[0]); //for(i = 0; i < size; i++) { // printf("%lf\t", host_result[i]); //} //printf("\n"); /* copy initial array back to a0 */ /* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k]; } } } //printf("Starting acc pragma code\n"); t1 = omp_get_wtime(); #pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n) { for (iter = 0; iter < ITERATIONS; iter++) { #pragma omp parallel for schedule(static, 16) num_threads(8) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { #pragma omp simd for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } #pragma acc parallel loop for (i = 1; i < n+1; i++) { #pragma acc loop for (j = 1; j < n+1; j++) { #pragma acc loop for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ } /* end data region */ #pragma acc wait t2 = omp_get_wtime(); memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz); md = max_diff(&host_result[0],&device_result[0], sz); /* Free malloc'd memory to prevent leaks */ free(a0); free(a0_init); free(a1); free(host_result); free(device_result); //printf("md: %lf \t tolerance: %lf", md, TOLERANCE); if (md < TOLERANCE ){ //printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE); return(t2 - t1); } else{ // printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md); return(-11000); } } /* Utility Functions */ double max_diff(double *array1,double *array2, int sz) { double tmpdiff, diff; int i,j,k; int n = sz-2; diff=0.0; for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]); //printf("diff: %lf", tmpdiff); if (tmpdiff > diff) diff = tmpdiff; } } } return diff; } /* * This function ensures the device is awake. * It is more portable than acc_init(). */ void wul(){ int data = 8192; double *arr_a = (double *)malloc(sizeof(double) * data); double *arr_b = (double *)malloc(sizeof(double) * data); int i = 0; if (arr_a==NULL||arr_b==NULL) { printf("Unable to allocate memory in wul.\n"); } for (i=0;i<data;i++){ arr_a[i] = (double) (rand()/(1.0+RAND_MAX)); } #pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data]) { #pragma acc parallel loop for (i=0;i<data;i++){ arr_b[i] = arr_a[i] * 2; } } if (arr_a[0] < 0){ printf("Error in WUL\n"); /* * This should never be called as rands should be in the range (0,1]. * This stops clever optimizers. */ } free(arr_a); free(arr_b); } int main(int argc, char **argv) { char testName[32]; //printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n"); /* Initialise storage for test results & parse input arguements. */ init(argc, argv); /* Ensure device is awake. */ wul(); sprintf(testName, "27S"); benchmark(testName, &stencil); /* Print results & free results storage */ finalise(); return EXIT_SUCCESS; }
transpose_conj_c_csc.c
#include "alphasparse/format.h" #include <stdlib.h> #include <alphasparse/opt.h> #include <alphasparse/util.h> #include <alphasparse/compute.h> #include <memory.h> alphasparse_status_t ONAME(const ALPHA_SPMAT_CSC *A, ALPHA_SPMAT_CSC **B) { ALPHA_SPMAT_CSC *mat = alpha_malloc(sizeof(ALPHA_SPMAT_CSC)); *B = mat; ALPHA_INT rowA = A->rows; ALPHA_INT colA = A->cols; mat->rows = colA; mat->cols = rowA; ALPHA_INT nnz = A->cols_end[colA - 1]; ALPHA_INT *cols_offset = alpha_memalign((mat->cols + 1) * sizeof(ALPHA_INT), DEFAULT_ALIGNMENT); mat->cols_start = cols_offset; mat->cols_end = cols_offset + 1; mat->row_indx = alpha_memalign(nnz * sizeof(ALPHA_INT), DEFAULT_ALIGNMENT); mat->values = alpha_memalign(nnz * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT); ALPHA_INT row_counter[rowA]; ALPHA_INT col_offset[mat->cols]; memset(row_counter, '\0', rowA * sizeof(ALPHA_INT)); for (ALPHA_INT i = 0; i < nnz; ++i) { row_counter[A->row_indx[i]] += 1; } col_offset[0] = 0; mat->cols_start[0] = 0; for (ALPHA_INT i = 1; i < mat->cols; ++i) { col_offset[i] = col_offset[i - 1] + row_counter[i - 1]; mat->cols_end[i - 1] = col_offset[i]; } mat->cols_end[mat->cols - 1] = nnz; ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT partition[num_threads + 1]; balanced_partition_row_by_nnz(mat->cols_end, mat->cols, num_threads, partition); #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT lcs = partition[tid]; ALPHA_INT lch = partition[tid + 1]; for (ALPHA_INT ac = 0; ac < colA; ++ac) { for (ALPHA_INT ai = A->cols_start[ac]; ai < A->cols_end[ac]; ++ai) { ALPHA_INT bc = A->row_indx[ai]; if (bc < lcs || bc >= lch) continue; ALPHA_INT index = col_offset[bc]; mat->row_indx[index] = ac; alpha_conj(mat->values[index], A->values[ai]); col_offset[bc] += 1; } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
GB_unaryop__lnot_uint64_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_fp64 // op(A') function: GB_tran__lnot_uint64_fp64 // C type: uint64_t // A type: double // cast: uint64_t cij ; GB_CAST_UNSIGNED(cij,aij,64) // unaryop: cij = !(aij != 0) #define GB_ATYPE \ double #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z ; GB_CAST_UNSIGNED(z,x,64) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_fp64 ( uint64_t *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
collision_matrix.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include "phonoc_array.h" #include "phonoc_utils.h" #include "collision_matrix.h" static void get_collision_matrix(double *collision_matrix, const double *fc3_normal_squared, const long num_band0, const long num_band, const double *frequencies, const long (*triplets)[3], const long *triplets_map, const long num_gp, const long *map_q, const long *rot_grid_points, const long num_ir_gp, const long num_rot, const double *rotations_cartesian, const double *g, const double temperature, const double unit_conversion_factor, const double cutoff_frequency); static void get_reducible_collision_matrix(double *collision_matrix, const double *fc3_normal_squared, const long num_band0, const long num_band, const double *frequencies, const long (*triplets)[3], const long *triplets_map, const long num_gp, const long *map_q, const double *g, const double temperature, const double unit_conversion_factor, const double cutoff_frequency); static void get_inv_sinh(double *inv_sinh, const long gp, const double temperature, const double *frequencies, const long triplet[3], const long *triplets_map, const long *map_q, const long num_band, const double cutoff_frequency); static long *create_gp2tp_map(const long *triplets_map, const long num_gp); void col_get_collision_matrix(double *collision_matrix, const Darray *fc3_normal_squared, const double *frequencies, const long (*triplets)[3], const long *triplets_map, const long *map_q, const long *rot_grid_points, const double *rotations_cartesian, const double *g, const long num_ir_gp, const long num_gp, const long num_rot, const double temperature, const double unit_conversion_factor, const double cutoff_frequency) { long num_triplets, num_band0, num_band; num_triplets = fc3_normal_squared->dims[0]; num_band0 = fc3_normal_squared->dims[1]; num_band = fc3_normal_squared->dims[2]; get_collision_matrix( collision_matrix, fc3_normal_squared->data, num_band0, num_band, frequencies, triplets, triplets_map, num_gp, map_q, rot_grid_points, num_ir_gp, num_rot, rotations_cartesian, g + 2 * num_triplets * num_band0 * num_band * num_band, temperature, unit_conversion_factor, cutoff_frequency); } void col_get_reducible_collision_matrix(double *collision_matrix, const Darray *fc3_normal_squared, const double *frequencies, const long (*triplets)[3], const long *triplets_map, const long *map_q, const double *g, const long num_gp, const double temperature, const double unit_conversion_factor, const double cutoff_frequency) { long num_triplets, num_band, num_band0; num_triplets = fc3_normal_squared->dims[0]; num_band0 = fc3_normal_squared->dims[1]; num_band = fc3_normal_squared->dims[2]; get_reducible_collision_matrix( collision_matrix, fc3_normal_squared->data, num_band0, num_band, frequencies, triplets, triplets_map, num_gp, map_q, g + 2 * num_triplets * num_band0 * num_band * num_band, temperature, unit_conversion_factor, cutoff_frequency); } static void get_collision_matrix(double *collision_matrix, const double *fc3_normal_squared, const long num_band0, const long num_band, const double *frequencies, const long (*triplets)[3], const long *triplets_map, const long num_gp, const long *map_q, const long *rot_grid_points, const long num_ir_gp, const long num_rot, const double *rotations_cartesian, const double *g, const double temperature, const double unit_conversion_factor, const double cutoff_frequency) { long i, j, k, l, m, n, ti, r_gp; long *gp2tp_map; double collision; double *inv_sinh; gp2tp_map = create_gp2tp_map(triplets_map, num_gp); #ifdef PHPYOPENMP #pragma omp parallel for private(j, k, l, m, n, ti, r_gp, collision, inv_sinh) #endif for (i = 0; i < num_ir_gp; i++) { inv_sinh = (double *)malloc(sizeof(double) * num_band); for (j = 0; j < num_rot; j++) { r_gp = rot_grid_points[i * num_rot + j]; ti = gp2tp_map[triplets_map[r_gp]]; get_inv_sinh(inv_sinh, r_gp, temperature, frequencies, triplets[ti], triplets_map, map_q, num_band, cutoff_frequency); for (k = 0; k < num_band0; k++) { for (l = 0; l < num_band; l++) { collision = 0; for (m = 0; m < num_band; m++) { collision += fc3_normal_squared[ti * num_band0 * num_band * num_band + k * num_band * num_band + l * num_band + m] * g[ti * num_band0 * num_band * num_band + k * num_band * num_band + l * num_band + m] * inv_sinh[m] * unit_conversion_factor; } for (m = 0; m < 3; m++) { for (n = 0; n < 3; n++) { collision_matrix[k * 3 * num_ir_gp * num_band * 3 + m * num_ir_gp * num_band * 3 + i * num_band * 3 + l * 3 + n] += collision * rotations_cartesian[j * 9 + m * 3 + n]; } } } } } free(inv_sinh); inv_sinh = NULL; } free(gp2tp_map); gp2tp_map = NULL; } static void get_reducible_collision_matrix(double *collision_matrix, const double *fc3_normal_squared, const long num_band0, const long num_band, const double *frequencies, const long (*triplets)[3], const long *triplets_map, const long num_gp, const long *map_q, const double *g, const double temperature, const double unit_conversion_factor, const double cutoff_frequency) { long i, j, k, l, ti; long *gp2tp_map; double collision; double *inv_sinh; gp2tp_map = create_gp2tp_map(triplets_map, num_gp); #ifdef PHPYOPENMP #pragma omp parallel for private(j, k, l, ti, collision, inv_sinh) #endif for (i = 0; i < num_gp; i++) { inv_sinh = (double *)malloc(sizeof(double) * num_band); ti = gp2tp_map[triplets_map[i]]; get_inv_sinh(inv_sinh, i, temperature, frequencies, triplets[ti], triplets_map, map_q, num_band, cutoff_frequency); for (j = 0; j < num_band0; j++) { for (k = 0; k < num_band; k++) { collision = 0; for (l = 0; l < num_band; l++) { collision += fc3_normal_squared[ti * num_band0 * num_band * num_band + j * num_band * num_band + k * num_band + l] * g[ti * num_band0 * num_band * num_band + j * num_band * num_band + k * num_band + l] * inv_sinh[l] * unit_conversion_factor; } collision_matrix[j * num_gp * num_band + i * num_band + k] += collision; } } free(inv_sinh); inv_sinh = NULL; } free(gp2tp_map); gp2tp_map = NULL; } static void get_inv_sinh(double *inv_sinh, const long gp, const double temperature, const double *frequencies, const long triplet[3], const long *triplets_map, const long *map_q, const long num_band, const double cutoff_frequency) { long i, gp2; double f; /* This assumes the algorithm of get_ir_triplets_at_q_perm_q1q2, */ /* where defined triplets_map[gp] == triplets_map[map_q[gp]]. */ /* If triplets_map[map_q[gp]] != map_q[gp], q1 and q2 are permuted. */ if (triplets_map[gp] == map_q[gp]) { gp2 = triplet[2]; } else { gp2 = triplet[1]; } for (i = 0; i < num_band; i++) { f = frequencies[gp2 * num_band + i]; if (f > cutoff_frequency) { inv_sinh[i] = phonoc_inv_sinh_occupation(f, temperature); } else { inv_sinh[i] = 0; } } } /* Symmetrically independent triplets are indexed. */ /* Inverse definition of ir_grid_points in get_BZ_triplets_at_q */ /* in triplet_grid.c. */ static long *create_gp2tp_map(const long *triplets_map, const long num_gp) { long i, num_ir; long *gp2tp_map; gp2tp_map = (long *)malloc(sizeof(long) * num_gp); num_ir = 0; for (i = 0; i < num_gp; i++) { if (triplets_map[i] == i) { gp2tp_map[i] = num_ir; num_ir++; } else { /* This should not be used. */ gp2tp_map[i] = -1; } } return gp2tp_map; }
edep.c
// Utility to parse lost particle data into angle-resolved energy deposition // data // // Usage: // ./edep [species name] // // // icc -fopenmp -o edep edep.c // // Requires OpenMP 4.5 (gcc 6.1 or Intel 17.0), or you may run slowrially // // Note the energy cutoff Ecut below. // // This program must match how the lost particle data are written as defined in // your deck. TODO: Update this all to hdf5 or similar, so you don't have to // know the data layout bit for bit. // // polarbinplot.py will make a nice plot for you using the output of this // program. // // First release written by Scott V. Luedtke, XCP-6, October 4, 2019. #include <stdio.h> #include <stdlib.h> /* for atoi */ #include <stdarg.h> /* for va_list, va_start, va_end */ #include <errno.h> #include <string.h> /* for strcspn */ #include <math.h> /* for sqrt */ #include <sys/stat.h> /* for mkdir */ #include <stdint.h> /* for uint32_t, uint64_t */ #include <inttypes.h> /* to print uint64_t */ #include <glob.h> /* to get list of filenames */ #define BEGIN_PRIMITIVE do #define END_PRIMITIVE while (0) void print_log( const char *fmt, ... ); #define ERROR(args) BEGIN_PRIMITIVE { \ print_log( "Error at %s(%i):\n\t", __FILE__, __LINE__ ); \ print_log args; \ print_log( "\n" ); \ } END_PRIMITIVE //--------------------------------------------------------------------- // General purpose memory allocation macro #define ALLOCATE(A,LEN,TYPE) \ if ( !((A)=(TYPE *)malloc((size_t)(LEN)*sizeof(TYPE))) ) \ ERROR(("Cannot allocate.")); // Construct an index for the particle data. This must match how the data are // output in the lost particle processor in your VPIC deck. // Sorry this is so ad hoc at the moment. #define ux 0 #define uy 1 #define uz 2 #define w 6 #define x 3 #define y 4 #define z 5 #define numvars 7 int main( int argc, char *argv[] ) { fprintf(stderr,"Ham.\n"); int num_tracers_total, nprocs; int nvar; int itmax; char temp[256]; char usage_msg[] = "Usage: ./edep [species name]\n\n"; if ( argc != 2 ) { fprintf( stderr, "%s", usage_msg ); exit(0); } // Read some numbers from params.txt char buffer[1024]; //int interval, nstep_total, i; FILE *params; params = fopen("../params.txt", "r"); if (!params) ERROR(("Cannot open params.txt. (The location is probably wrong.)")); double timeToSI, lengthToSI, massToSI, chargeToSI; fgets(buffer, 1024, params); fscanf(params, "%lf %[^\n]\n", &timeToSI, buffer); fscanf(params, "%lf %[^\n]\n", &lengthToSI, buffer); fscanf(params, "%lf %[^\n]\n", &massToSI, buffer); fscanf(params, "%lf %[^\n]\n", &chargeToSI, buffer); fclose(params); char *particle = argv[1]; float Ecut = 1.; // MeV double tmax = M_PI; double tmin = 0; double pmax = 2.*M_PI; double pmin = 0; // Must define these to use in the reduction clause #define nbinsp 100 #define nbinst 100 double dp = pmax/(double)nbinsp; //double dt = tmax/(double)nbinst; // When you have 10^13 particles at 10^8 precision, you might need extended // precision for the sums. long double hist[nbinst][nbinsp] = {0}; // The code uses normalized momentum, so don't use the conversion factors // from params.txt. If you want to use different units than here, change // this, or manually adjust them in your plotter. #define e_SI (1.602176634e-19) /* C */ #define c_SI (2.99792458e8) /* m / s */ #define m_e_SI (9.1093837015e-31) /* kg */ #define mp_me 1836.15267343 double ekMeVconst; double elecekMeVconst = m_e_SI*c_SI*c_SI*1e-6/e_SI; double carbekMeVconst = 12.*mp_me*elecekMeVconst; double protonekMeVconst = mp_me*elecekMeVconst; if (strcmp(particle, "I2")==0) ekMeVconst = carbekMeVconst; else if (strcmp(particle, "proton")==0){ ekMeVconst = protonekMeVconst; particle = "I2"; } else ekMeVconst = elecekMeVconst; //TODO: Race this (untested!) bit of code against glob on a large VPIC run on //Lustre //// Count how many files there are in the lostparts directory //char dirname[256] = "../../lostparts"; //struct dirent *dp; //DIR *dir = opendir(dirname); //if(!dir) ERROR("Directory %s not found", dirname); //int count=0; //while (dp = readdir(dir)) ++count; //closedir(dir); //// Construct the (massive) list of files //char **filelist; //ALLOCATE(filelist, count, char*); //dir = opendir(dirname); //for(int i=0;i<count;i++){ // ALLOCATE(filelist[i], 64, char); // filelist[i] = readdir(dir)->d_name; //} char filepath[256]; // Better be big enough sprintf(filepath, "../pb_diagnostic/%s.*", particle); glob_t globbuf; glob(filepath, GLOB_NOSORT, NULL, &globbuf); size_t count = globbuf.gl_pathc; fprintf(stderr, "count is %zu\n", count); // Consider only particles within a certain box // Check if the line that does this is commented below! double xmin = 20e-6/lengthToSI; double xmax = 80e-6/lengthToSI; double ymin = -15e-6/lengthToSI; double ymax = 15e-6/lengthToSI; double zmin = -15e-6/lengthToSI; double zmax = 15e-6/lengthToSI; long double Etot=0; unsigned long long int ntot=0; // There can be a \emph{lot} of particles. #pragma omp parallel { // Implicitly private variables declared outside of the loop char filename[256]; float partf[numvars]; double part[numvars]; float u2,ek,thet, phi; int tbin, pbin, j; size_t i; unsigned long int counter; FILE *data; #pragma omp for schedule(guided) reduction(+:hist, Etot, ntot) for(i=0;i<count;i++){ // Check if the filename is one we want //if (!strncmp(filelist[i], "boundary", 8)) continue; sprintf(filename, "%s", globbuf.gl_pathv[i]); //fprintf(stderr, "Working on file %s.\n", filename); counter=0; data = fopen(filename, "rb"); if (data == NULL) ERROR(("Cannot open file %s\n", filename)); while(1){ if (fread(partf, sizeof(float), numvars, data) != numvars){ //printf("fread failed !!!! \n\nEOF was not set!!!!\n\n"); } if (feof(data)){ //printf("breaking\n"); break; } // Cast to double precission to avoid "wierd" floting point edge cases for (j=0;j<numvars;j++) part[j] = partf[j]; // If not in box, ignore //if (part[x]<xmin || part[x]>xmax || part[z]<zmin || part[z]>zmax) continue; //fprintf(stderr, "starting counter %lu\n", counter); // Get energy index u2 = part[ux]*part[ux] + part[uy]*part[uy] + part[uz]*part[uz]; ek = ekMeVconst * u2/ (1. + sqrt(1.+ u2)); if (ek<Ecut) continue; Etot += ek*part[w]; // Increment Etot before putting it in the hist phi = atan2f(part[uy], part[uz]) + M_PI; pbin = (int)(phi/dp); // Get angle index thet = acosf(part[ux]/sqrtf(u2)); //tbin = (int)(thet/dt); tbin = (int) nbinst*.5*(1. - cosf(thet));//TODO: cos of acos // Especially in single precision, the argument of acos above can be // negative unity, necessitating this edge case. if (tbin==nbinst) tbin--; // Similarly for atan2 if (pbin==nbinsp) pbin--; if(pbin >= nbinsp || tbin >= nbinst || tbin < 0 || pbin < 0){ fprintf(stderr, "pbin is %d tbin is %d count is %lu\n", pbin, tbin, counter); fprintf(stderr, "ux is %.18e uy is %.18e uz is %.18e\n", part[ux], part[uy], part[uz]); fprintf(stderr, "The ux is %.18e and the fraction is %.18e\n", part[ux], part[ux]/sqrtf(u2)); fprintf(stderr, "is equal evaluates to %d\n", -1.*part[ux]==sqrtf(u2)); fprintf(stderr, "ux/sqrtf(u2) is %.18e\nsqrtf(u2)/ux is %.18e\n", sqrtf(u2)/part[ux], part[ux]/sqrtf(u2)); ERROR(("You're probably about to segfault")); } hist[tbin][pbin] += part[w]*ek; ntot++; counter++; //fprintf(stderr, "Done with this one\n"); } fclose(data); fprintf(stderr, "File %zu had %lu entries\n", i, counter); } } globfree(&globbuf); fprintf(stdout, "The total number of simulation particles used is %lld\n", ntot); fprintf(stdout, "The total energy in the particles is %Lg MeV, or %Lg J.\n", Etot, Etot*1e6*e_SI); // Write the hist params for the Python plotter to read FILE * out; sprintf(temp, "%s%s", particle, "edepparams.txt"); out = fopen(temp, "w"); fprintf(out, "# Parameter file used for the Python 2D hist plotter.\n"); fprintf(out, "%.14e Theta minimum.\n", tmin); fprintf(out, "%.14e Theta maximum\n", tmax); fprintf(out, "%.14e Phi minimum.\n", pmin); fprintf(out, "%.14e Phi maximum\n", pmax); fprintf(out, "%d Number of bins in theta\n", nbinst); fprintf(out, "%d Number of bins in phi\n", nbinsp); fprintf(out, "%s Particle species\n", particle); fprintf(out, "%.14e ekMeVconst\n", ekMeVconst); fprintf(out, "%llu Number of particles used\n", ntot); fprintf(out, "%.14e Total Energy in histogram (MeV*weight)\n", Etot); fclose(out); int i,j; // Store the histogram for Python plotting // Cast to double so numpy can understand double histD[nbinst][nbinsp]; for(i=0;i<nbinst;i++) for(j=0;j<nbinsp;j++) histD[i][j] = hist[i][j]; sprintf(temp, "%s%s", particle, "edep.bin"); out = fopen(temp, "w"); for(i=0;i<nbinst;i++) fwrite(histD[i], sizeof(double), nbinsp, out); fclose(out); return 0; } // main //--------------------------------------------------------------------- // For ERROR macro // void print_log( const char *fmt, ... ) { va_list ap; va_start( ap, fmt ); vfprintf( stderr, fmt, ap ); va_end( ap ); fflush( stderr ); } // print_log
omp-taskgroup.c
#include <omp.h> #include <unistd.h> #include <stdio.h> #define THREADS 2 #define LEN 2 int main(void) { int counter[THREADS]={0}; #pragma omp parallel num_threads(THREADS) { #pragma omp taskgroup for (counter[omp_get_thread_num()]=0; counter[omp_get_thread_num()]<LEN; counter[omp_get_thread_num()]++) { #pragma omp task { #pragma omp task {usleep(10);} #pragma omp task {usleep(10);} #pragma omp task {usleep(10);} } } #pragma omp taskgroup for (counter[omp_get_thread_num()]=0; counter[omp_get_thread_num()]<LEN; counter[omp_get_thread_num()]++) { #pragma omp task { #pragma omp task { #pragma omp task {usleep(10);} #pragma omp task {usleep(10);} } #pragma omp task {usleep(10);} } } } return 0; }
implicit_blender.c
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) Blender Foundation * All rights reserved. */ /** \file * \ingroup bph */ #include "implicit.h" #ifdef IMPLICIT_SOLVER_BLENDER # include "MEM_guardedalloc.h" # include "DNA_scene_types.h" # include "DNA_object_types.h" # include "DNA_object_force_types.h" # include "DNA_meshdata_types.h" # include "DNA_texture_types.h" # include "BLI_math.h" # include "BLI_utildefines.h" # include "BKE_cloth.h" # include "BKE_collision.h" # include "BKE_effect.h" # include "BPH_mass_spring.h" # ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wtype-limits" # endif # ifdef _OPENMP # define CLOTH_OPENMP_LIMIT 512 # endif //#define DEBUG_TIME # ifdef DEBUG_TIME # include "PIL_time.h" # endif static float I[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; static float ZERO[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; # if 0 # define C99 # ifdef C99 # defineDO_INLINE inline # else # defineDO_INLINE static # endif # endif /* if 0 */ struct Cloth; ////////////////////////////////////////// /* fast vector / matrix library, enhancements are welcome :) -dg */ ///////////////////////////////////////// /* DEFINITIONS */ typedef float lfVector[3]; typedef struct fmatrix3x3 { float m[3][3]; /* 3x3 matrix */ unsigned int c, r; /* column and row number */ /* int pinned; // is this vertex allowed to move? */ float n1, n2, n3; /* three normal vectors for collision constrains */ unsigned int vcount; /* vertex count */ unsigned int scount; /* spring count */ } fmatrix3x3; /////////////////////////// // float[3] vector /////////////////////////// /* simple vector code */ /* STATUS: verified */ DO_INLINE void mul_fvector_S(float to[3], float from[3], float scalar) { to[0] = from[0] * scalar; to[1] = from[1] * scalar; to[2] = from[2] * scalar; } /* simple v^T * v product ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvector(float to[3][3], float vectorA[3], float vectorB[3]) { mul_fvector_S(to[0], vectorB, vectorA[0]); mul_fvector_S(to[1], vectorB, vectorA[1]); mul_fvector_S(to[2], vectorB, vectorA[2]); } /* simple v^T * v product with scalar ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvectorS(float to[3][3], float vectorA[3], float vectorB[3], float aS) { mul_fvectorT_fvector(to, vectorA, vectorB); mul_fvector_S(to[0], to[0], aS); mul_fvector_S(to[1], to[1], aS); mul_fvector_S(to[2], to[2], aS); } # if 0 /* printf vector[3] on console: for debug output */ static void print_fvector(float m3[3]) { printf("%f\n%f\n%f\n\n", m3[0], m3[1], m3[2]); } /////////////////////////// // long float vector float (*)[3] /////////////////////////// /* print long vector on console: for debug output */ DO_INLINE void print_lfvector(float (*fLongVector)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { print_fvector(fLongVector[i]); } } # endif /* create long vector */ DO_INLINE lfVector *create_lfvector(unsigned int verts) { /* TODO: check if memory allocation was successful */ return (lfVector *)MEM_callocN(verts * sizeof(lfVector), "cloth_implicit_alloc_vector"); // return (lfVector *)cloth_aligned_malloc(&MEMORY_BASE, verts * sizeof(lfVector)); } /* delete long vector */ DO_INLINE void del_lfvector(float (*fLongVector)[3]) { if (fLongVector != NULL) { MEM_freeN(fLongVector); // cloth_aligned_free(&MEMORY_BASE, fLongVector); } } /* copy long vector */ DO_INLINE void cp_lfvector(float (*to)[3], float (*from)[3], unsigned int verts) { memcpy(to, from, verts * sizeof(lfVector)); } /* init long vector with float[3] */ DO_INLINE void init_lfvector(float (*fLongVector)[3], float vector[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { copy_v3_v3(fLongVector[i], vector); } } /* zero long vector with float[3] */ DO_INLINE void zero_lfvector(float (*to)[3], unsigned int verts) { memset(to, 0.0f, verts * sizeof(lfVector)); } /* multiply long vector with scalar*/ DO_INLINE void mul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { mul_fvector_S(to[i], fLongVector[i], scalar); } } /* multiply long vector with scalar*/ /* A -= B * float */ DO_INLINE void submul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBMUL(to[i], fLongVector[i], scalar); } } /* dot product for big vector */ DO_INLINE float dot_lfvector(float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { long i = 0; float temp = 0.0; // XXX brecht, disabled this for now (first schedule line was already disabled), // due to non-commutative nature of floating point ops this makes the sim give // different results each time you run it! // schedule(guided, 2) //#pragma omp parallel for reduction(+: temp) if (verts > CLOTH_OPENMP_LIMIT) for (i = 0; i < (long)verts; i++) { temp += dot_v3v3(fLongVectorA[i], fLongVectorB[i]); } return temp; } /* A = B + C --> for big vector */ DO_INLINE void add_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { add_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /* A = B + C * float --> for big vector */ DO_INLINE void add_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B * float + C * float --> for big vector */ DO_INLINE void add_lfvectorS_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float aS, float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDSS(to[i], fLongVectorA[i], aS, fLongVectorB[i], bS); } } /* A = B - C * float --> for big vector */ DO_INLINE void sub_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B - C --> for big vector */ DO_INLINE void sub_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { sub_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /////////////////////////// // 3x3 matrix /////////////////////////// # if 0 /* printf 3x3 matrix on console: for debug output */ static void print_fmatrix(float m3[3][3]) { printf("%f\t%f\t%f\n", m3[0][0], m3[0][1], m3[0][2]); printf("%f\t%f\t%f\n", m3[1][0], m3[1][1], m3[1][2]); printf("%f\t%f\t%f\n\n", m3[2][0], m3[2][1], m3[2][2]); } static void print_sparse_matrix(fmatrix3x3 *m) { if (m) { unsigned int i; for (i = 0; i < m[0].vcount + m[0].scount; i++) { printf("%d:\n", i); print_fmatrix(m[i].m); } } } # endif # if 0 static void print_lvector(lfVector *v, int numverts) { int i; for (i = 0; i < numverts; ++i) { if (i > 0) { printf("\n"); } printf("%f,\n", v[i][0]); printf("%f,\n", v[i][1]); printf("%f,\n", v[i][2]); } } # endif # if 0 static void print_bfmatrix(fmatrix3x3 *m) { int tot = m[0].vcount + m[0].scount; int size = m[0].vcount * 3; float *t = MEM_callocN(sizeof(float) * size * size, "bfmatrix"); int q, i, j; for (q = 0; q < tot; ++q) { int k = 3 * m[q].r; int l = 3 * m[q].c; for (j = 0; j < 3; ++j) { for (i = 0; i < 3; ++i) { // if (t[k + i + (l + j) * size] != 0.0f) { // printf("warning: overwriting value at %d, %d\n", m[q].r, m[q].c); // } if (k == l) { t[k + i + (k + j) * size] += m[q].m[i][j]; } else { t[k + i + (l + j) * size] += m[q].m[i][j]; t[l + j + (k + i) * size] += m[q].m[j][i]; } } } } for (j = 0; j < size; ++j) { if (j > 0 && j % 3 == 0) { printf("\n"); } for (i = 0; i < size; ++i) { if (i > 0 && i % 3 == 0) { printf(" "); } implicit_print_matrix_elem(t[i + j * size]); } printf("\n"); } MEM_freeN(t); } # endif /* copy 3x3 matrix */ DO_INLINE void cp_fmatrix(float to[3][3], float from[3][3]) { // memcpy(to, from, sizeof (float) * 9); copy_v3_v3(to[0], from[0]); copy_v3_v3(to[1], from[1]); copy_v3_v3(to[2], from[2]); } /* copy 3x3 matrix */ DO_INLINE void initdiag_fmatrixS(float to[3][3], float aS) { cp_fmatrix(to, ZERO); to[0][0] = aS; to[1][1] = aS; to[2][2] = aS; } # if 0 /* calculate determinant of 3x3 matrix */ DO_INLINE float det_fmatrix(float m[3][3]) { return m[0][0] * m[1][1] * m[2][2] + m[1][0] * m[2][1] * m[0][2] + m[0][1] * m[1][2] * m[2][0] - m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[2][0] * m[1][1] * m[0][2]; } DO_INLINE void inverse_fmatrix(float to[3][3], float from[3][3]) { unsigned int i, j; float d; if ((d = det_fmatrix(from)) == 0) { printf("can't build inverse"); exit(0); } for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { int i1 = (i + 1) % 3; int i2 = (i + 2) % 3; int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; /** Reverse indexes i&j to take transpose. */ to[j][i] = (from[i1][j1] * from[i2][j2] - from[i1][j2] * from[i2][j1]) / d; /** * <pre> * if (i == j) { * to[i][j] = 1.0f / from[i][j]; * } * else { * to[i][j] = 0; * } * </pre> */ } } } # endif /* 3x3 matrix multiplied by a scalar */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_S(float matrix[3][3], float scalar) { mul_fvector_S(matrix[0], matrix[0], scalar); mul_fvector_S(matrix[1], matrix[1], scalar); mul_fvector_S(matrix[2], matrix[2], scalar); } /* a vector multiplied by a 3x3 matrix */ /* STATUS: verified */ DO_INLINE void mul_fvector_fmatrix(float *to, float *from, float matrix[3][3]) { to[0] = matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] = matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] = matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } /* 3x3 matrix multiplied by a vector */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_fvector(float *to, float matrix[3][3], float from[3]) { to[0] = dot_v3v3(matrix[0], from); to[1] = dot_v3v3(matrix[1], from); to[2] = dot_v3v3(matrix[2], from); } /* 3x3 matrix addition with 3x3 matrix */ DO_INLINE void add_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { add_v3_v3v3(to[0], matrixA[0], matrixB[0]); add_v3_v3v3(to[1], matrixA[1], matrixB[1]); add_v3_v3v3(to[2], matrixA[2], matrixB[2]); } /* A -= B*x + C*y (3x3 matrix sub-addition with 3x3 matrix) */ DO_INLINE void subadd_fmatrixS_fmatrixS( float to[3][3], float matrixA[3][3], float aS, float matrixB[3][3], float bS) { VECSUBADDSS(to[0], matrixA[0], aS, matrixB[0], bS); VECSUBADDSS(to[1], matrixA[1], aS, matrixB[1], bS); VECSUBADDSS(to[2], matrixA[2], aS, matrixB[2], bS); } /* A = B - C (3x3 matrix subtraction with 3x3 matrix) */ DO_INLINE void sub_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { sub_v3_v3v3(to[0], matrixA[0], matrixB[0]); sub_v3_v3v3(to[1], matrixA[1], matrixB[1]); sub_v3_v3v3(to[2], matrixA[2], matrixB[2]); } ///////////////////////////////////////////////////////////////// // special functions ///////////////////////////////////////////////////////////////// /* 3x3 matrix multiplied+added by a vector */ /* STATUS: verified */ DO_INLINE void muladd_fmatrix_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += dot_v3v3(matrix[0], from); to[1] += dot_v3v3(matrix[1], from); to[2] += dot_v3v3(matrix[2], from); } DO_INLINE void muladd_fmatrixT_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] += matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] += matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } BLI_INLINE void outerproduct(float r[3][3], const float a[3], const float b[3]) { mul_v3_v3fl(r[0], a, b[0]); mul_v3_v3fl(r[1], a, b[1]); mul_v3_v3fl(r[2], a, b[2]); } BLI_INLINE void cross_m3_v3m3(float r[3][3], const float v[3], float m[3][3]) { cross_v3_v3v3(r[0], v, m[0]); cross_v3_v3v3(r[1], v, m[1]); cross_v3_v3v3(r[2], v, m[2]); } BLI_INLINE void cross_v3_identity(float r[3][3], const float v[3]) { r[0][0] = 0.0f; r[1][0] = v[2]; r[2][0] = -v[1]; r[0][1] = -v[2]; r[1][1] = 0.0f; r[2][1] = v[0]; r[0][2] = v[1]; r[1][2] = -v[0]; r[2][2] = 0.0f; } BLI_INLINE void madd_m3_m3fl(float r[3][3], float m[3][3], float f) { r[0][0] += m[0][0] * f; r[0][1] += m[0][1] * f; r[0][2] += m[0][2] * f; r[1][0] += m[1][0] * f; r[1][1] += m[1][1] * f; r[1][2] += m[1][2] * f; r[2][0] += m[2][0] * f; r[2][1] += m[2][1] * f; r[2][2] += m[2][2] * f; } ///////////////////////////////////////////////////////////////// /////////////////////////// // SPARSE SYMMETRIC big matrix with 3x3 matrix entries /////////////////////////// /* printf a big matrix on console: for debug output */ # if 0 static void print_bfmatrix(fmatrix3x3 *m3) { unsigned int i = 0; for (i = 0; i < m3[0].vcount + m3[0].scount; i++) { print_fmatrix(m3[i].m); } } # endif BLI_INLINE void init_fmatrix(fmatrix3x3 *matrix, int r, int c) { matrix->r = r; matrix->c = c; } /* create big matrix */ DO_INLINE fmatrix3x3 *create_bfmatrix(unsigned int verts, unsigned int springs) { // TODO: check if memory allocation was successful */ fmatrix3x3 *temp = (fmatrix3x3 *)MEM_callocN(sizeof(fmatrix3x3) * (verts + springs), "cloth_implicit_alloc_matrix"); int i; temp[0].vcount = verts; temp[0].scount = springs; /* vertex part of the matrix is diagonal blocks */ for (i = 0; i < verts; ++i) { init_fmatrix(temp + i, i, i); } return temp; } /* delete big matrix */ DO_INLINE void del_bfmatrix(fmatrix3x3 *matrix) { if (matrix != NULL) { MEM_freeN(matrix); } } /* copy big matrix */ DO_INLINE void cp_bfmatrix(fmatrix3x3 *to, fmatrix3x3 *from) { // TODO bounds checking memcpy(to, from, sizeof(fmatrix3x3) * (from[0].vcount + from[0].scount)); } /* init big matrix */ // slow in parallel DO_INLINE void init_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i; for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { cp_fmatrix(matrix[i].m, m3); } } /* init the diagonal of big matrix */ // slow in parallel DO_INLINE void initdiag_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i, j; float tmatrix[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; for (i = 0; i < matrix[0].vcount; i++) { cp_fmatrix(matrix[i].m, m3); } for (j = matrix[0].vcount; j < matrix[0].vcount + matrix[0].scount; j++) { cp_fmatrix(matrix[j].m, tmatrix); } } /* SPARSE SYMMETRIC multiply big matrix with long vector*/ /* STATUS: verified */ DO_INLINE void mul_bfmatrix_lfvector(float (*to)[3], fmatrix3x3 *from, lfVector *fLongVector) { unsigned int vcount = from[0].vcount; lfVector *temp = create_lfvector(vcount); zero_lfvector(to, vcount); # pragma omp parallel sections if (vcount > CLOTH_OPENMP_LIMIT) { # pragma omp section { for (unsigned int i = from[0].vcount; i < from[0].vcount + from[0].scount; i++) { /* This is the lower triangle of the sparse matrix, * therefore multiplication occurs with transposed submatrices. */ muladd_fmatrixT_fvector(to[from[i].c], from[i].m, fLongVector[from[i].r]); } } # pragma omp section { for (unsigned int i = 0; i < from[0].vcount + from[0].scount; i++) { muladd_fmatrix_fvector(temp[from[i].r], from[i].m, fLongVector[from[i].c]); } } } add_lfvector_lfvector(to, to, temp, from[0].vcount); del_lfvector(temp); } /* SPARSE SYMMETRIC sub big matrix with big matrix*/ /* A -= B * float + C * float --> for big matrix */ /* VERIFIED */ DO_INLINE void subadd_bfmatrixS_bfmatrixS( fmatrix3x3 *to, fmatrix3x3 *from, float aS, fmatrix3x3 *matrix, float bS) { unsigned int i = 0; /* process diagonal elements */ for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { subadd_fmatrixS_fmatrixS(to[i].m, from[i].m, aS, matrix[i].m, bS); } } /////////////////////////////////////////////////////////////////// // simulator start /////////////////////////////////////////////////////////////////// typedef struct Implicit_Data { /* inputs */ fmatrix3x3 *bigI; /* identity (constant) */ fmatrix3x3 *tfm; /* local coordinate transform */ fmatrix3x3 *M; /* masses */ lfVector *F; /* forces */ fmatrix3x3 *dFdV, *dFdX; /* force jacobians */ int num_blocks; /* number of off-diagonal blocks (springs) */ /* motion state data */ lfVector *X, *Xnew; /* positions */ lfVector *V, *Vnew; /* velocities */ /* internal solver data */ lfVector *B; /* B for A*dV = B */ fmatrix3x3 *A; /* A for A*dV = B */ lfVector *dV; /* velocity change (solution of A*dV = B) */ lfVector *z; /* target velocity in constrained directions */ fmatrix3x3 *S; /* filtering matrix for constraints */ fmatrix3x3 *P, *Pinv; /* pre-conditioning matrix */ } Implicit_Data; Implicit_Data *BPH_mass_spring_solver_create(int numverts, int numsprings) { Implicit_Data *id = (Implicit_Data *)MEM_callocN(sizeof(Implicit_Data), "implicit vecmat"); /* process diagonal elements */ id->tfm = create_bfmatrix(numverts, 0); id->A = create_bfmatrix(numverts, numsprings); id->dFdV = create_bfmatrix(numverts, numsprings); id->dFdX = create_bfmatrix(numverts, numsprings); id->S = create_bfmatrix(numverts, 0); id->Pinv = create_bfmatrix(numverts, numsprings); id->P = create_bfmatrix(numverts, numsprings); id->bigI = create_bfmatrix(numverts, numsprings); // TODO 0 springs id->M = create_bfmatrix(numverts, numsprings); id->X = create_lfvector(numverts); id->Xnew = create_lfvector(numverts); id->V = create_lfvector(numverts); id->Vnew = create_lfvector(numverts); id->F = create_lfvector(numverts); id->B = create_lfvector(numverts); id->dV = create_lfvector(numverts); id->z = create_lfvector(numverts); initdiag_bfmatrix(id->bigI, I); return id; } void BPH_mass_spring_solver_free(Implicit_Data *id) { del_bfmatrix(id->tfm); del_bfmatrix(id->A); del_bfmatrix(id->dFdV); del_bfmatrix(id->dFdX); del_bfmatrix(id->S); del_bfmatrix(id->P); del_bfmatrix(id->Pinv); del_bfmatrix(id->bigI); del_bfmatrix(id->M); del_lfvector(id->X); del_lfvector(id->Xnew); del_lfvector(id->V); del_lfvector(id->Vnew); del_lfvector(id->F); del_lfvector(id->B); del_lfvector(id->dV); del_lfvector(id->z); MEM_freeN(id); } /* ==== Transformation from/to root reference frames ==== */ BLI_INLINE void world_to_root_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { copy_v3_v3(r, v); mul_transposed_m3_v3(data->tfm[index].m, r); } BLI_INLINE void root_to_world_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { mul_v3_m3v3(r, data->tfm[index].m, v); } BLI_INLINE void world_to_root_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { float trot[3][3]; copy_m3_m3(trot, data->tfm[index].m); transpose_m3(trot); mul_m3_m3m3(r, trot, m); } BLI_INLINE void root_to_world_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { mul_m3_m3m3(r, data->tfm[index].m, m); } /* ================================ */ DO_INLINE void filter(lfVector *V, fmatrix3x3 *S) { unsigned int i = 0; for (i = 0; i < S[0].vcount; i++) { mul_m3_v3(S[i].m, V[S[i].r]); } } /* this version of the CG algorithm does not work very well with partial constraints * (where S has non-zero elements). */ # if 0 static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.0001f /* , conjgrad_lasterror=0 */ /* UNUSED */; lfVector *q, *d, *tmp, *r; float s, starget, a, s_prev; unsigned int numverts = lA[0].vcount; q = create_lfvector(numverts); d = create_lfvector(numverts); tmp = create_lfvector(numverts); r = create_lfvector(numverts); // zero_lfvector(ldV, CLOTHPARTICLES); filter(ldV, S); add_lfvector_lfvector(ldV, ldV, z, numverts); // r = B - Mul(tmp, A, X); // just use B if X known to be zero cp_lfvector(r, lB, numverts); mul_bfmatrix_lfvector(tmp, lA, ldV); sub_lfvector_lfvector(r, r, tmp, numverts); filter(r, S); cp_lfvector(d, r, numverts); s = dot_lfvector(r, r, numverts); starget = s * sqrtf(conjgrad_epsilon); while (s > starget && conjgrad_loopcount < conjgrad_looplimit) { // Mul(q, A, d); // q = A*d; mul_bfmatrix_lfvector(q, lA, d); filter(q, S); a = s / dot_lfvector(d, q, numverts); // X = X + d*a; add_lfvector_lfvectorS(ldV, ldV, d, a, numverts); // r = r - q*a; sub_lfvector_lfvectorS(r, r, q, a, numverts); s_prev = s; s = dot_lfvector(r, r, numverts); //d = r+d*(s/s_prev); add_lfvector_lfvectorS(d, r, d, (s / s_prev), numverts); filter(d, S); conjgrad_loopcount++; } /* conjgrad_lasterror = s; */ /* UNUSED */ del_lfvector(q); del_lfvector(d); del_lfvector(tmp); del_lfvector(r); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # endif static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, ImplicitSolverResult *result) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.01f; unsigned int numverts = lA[0].vcount; lfVector *fB = create_lfvector(numverts); lfVector *AdV = create_lfvector(numverts); lfVector *r = create_lfvector(numverts); lfVector *c = create_lfvector(numverts); lfVector *q = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); float bnorm2, delta_new, delta_old, delta_target, alpha; cp_lfvector(ldV, z, numverts); /* d0 = filter(B)^T * P * filter(B) */ cp_lfvector(fB, lB, numverts); filter(fB, S); bnorm2 = dot_lfvector(fB, fB, numverts); delta_target = conjgrad_epsilon * conjgrad_epsilon * bnorm2; /* r = filter(B - A * dV) */ mul_bfmatrix_lfvector(AdV, lA, ldV); sub_lfvector_lfvector(r, lB, AdV, numverts); filter(r, S); /* c = filter(P^-1 * r) */ cp_lfvector(c, r, numverts); filter(c, S); /* delta = r^T * c */ delta_new = dot_lfvector(r, c, numverts); # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== A ====\n"); print_bfmatrix(lA); printf("==== z ====\n"); print_lvector(z, numverts); printf("==== B ====\n"); print_lvector(lB, numverts); printf("==== S ====\n"); print_bfmatrix(S); # endif while (delta_new > delta_target && conjgrad_loopcount < conjgrad_looplimit) { mul_bfmatrix_lfvector(q, lA, c); filter(q, S); alpha = delta_new / dot_lfvector(c, q, numverts); add_lfvector_lfvectorS(ldV, ldV, c, alpha, numverts); add_lfvector_lfvectorS(r, r, q, -alpha, numverts); /* s = P^-1 * r */ cp_lfvector(s, r, numverts); delta_old = delta_new; delta_new = dot_lfvector(r, s, numverts); add_lfvector_lfvectorS(c, s, c, delta_new / delta_old, numverts); filter(c, S); conjgrad_loopcount++; } # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== dV ====\n"); print_lvector(ldV, numverts); printf("========\n"); # endif del_lfvector(fB); del_lfvector(AdV); del_lfvector(r); del_lfvector(c); del_lfvector(q); del_lfvector(s); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); result->status = conjgrad_loopcount < conjgrad_looplimit ? BPH_SOLVER_SUCCESS : BPH_SOLVER_NO_CONVERGENCE; result->iterations = conjgrad_loopcount; result->error = bnorm2 > 0.0f ? sqrtf(delta_new / bnorm2) : 0.0f; return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # if 0 // block diagonalizer DO_INLINE void BuildPPinv(fmatrix3x3 *lA, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int i = 0; // Take only the diagonal blocks of A // #pragma omp parallel for private(i) if (lA[0].vcount > CLOTH_OPENMP_LIMIT) for (i = 0; i < lA[0].vcount; i++) { // block diagonalizer cp_fmatrix(P[i].m, lA[i].m); inverse_fmatrix(Pinv[i].m, P[i].m); } } # if 0 // version 1.3 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0; float conjgrad_epsilon = 0.0001; // 0.2 is dt for steps=5 lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif while ((deltaNew > delta0) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif // version 1.4 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv, fmatrix3x3 *bigI) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0, tol = 0; lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); lfVector *bhat = create_lfvector(numverts); lfVector *btemp = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); initdiag_bfmatrix(bigI, I); sub_bfmatrix_Smatrix(bigI, bigI, S); // x = Sx_0+(I-S)z filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); // b_hat = S(b-A(I-S)z) mul_bfmatrix_lfvector(r, lA, z); mul_bfmatrix_lfvector(bhat, bigI, r); sub_lfvector_lfvector(bhat, lB, bhat, numverts); // r = S(b-Ax) mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); // p = SP^-1r mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); // delta0 = bhat^TP^-1bhat mul_prevfmatrix_lfvector(btemp, Pinv, bhat); delta0 = dot_lfvector(bhat, btemp, numverts); // deltaNew = r^TP deltaNew = dot_lfvector(r, p, numverts); # if 0 filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # endif # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif tol = (0.01 * 0.2); while ((deltaNew > delta0 * tol * tol) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(btemp); del_lfvector(bhat); del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); // printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif bool BPH_mass_spring_solve_velocities(Implicit_Data *data, float dt, ImplicitSolverResult *result) { unsigned int numverts = data->dFdV[0].vcount; lfVector *dFdXmV = create_lfvector(numverts); zero_lfvector(data->dV, numverts); cp_bfmatrix(data->A, data->M); subadd_bfmatrixS_bfmatrixS(data->A, data->dFdV, dt, data->dFdX, (dt * dt)); mul_bfmatrix_lfvector(dFdXmV, data->dFdX, data->V); add_lfvectorS_lfvectorS(data->B, data->F, dt, dFdXmV, (dt * dt), numverts); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif cg_filtered(data->dV, data->A, data->B, data->z, data->S, result); /* conjugate gradient algorithm to solve Ax=b */ // cg_filtered_pre(id->dV, id->A, id->B, id->z, id->S, id->P, id->Pinv, id->bigI); # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered calc time: %f\n", (float)(end - start)); # endif // advance velocities add_lfvector_lfvector(data->Vnew, data->V, data->dV, numverts); del_lfvector(dFdXmV); return result->status == BPH_SOLVER_SUCCESS; } bool BPH_mass_spring_solve_positions(Implicit_Data *data, float dt) { int numverts = data->M[0].vcount; // advance positions add_lfvector_lfvectorS(data->Xnew, data->X, data->Vnew, dt, numverts); return true; } void BPH_mass_spring_apply_result(Implicit_Data *data) { int numverts = data->M[0].vcount; cp_lfvector(data->X, data->Xnew, numverts); cp_lfvector(data->V, data->Vnew, numverts); } void BPH_mass_spring_set_vertex_mass(Implicit_Data *data, int index, float mass) { unit_m3(data->M[index].m); mul_m3_fl(data->M[index].m, mass); } void BPH_mass_spring_set_rest_transform(Implicit_Data *data, int index, float tfm[3][3]) { # ifdef CLOTH_ROOT_FRAME copy_m3_m3(data->tfm[index].m, tfm); # else unit_m3(data->tfm[index].m); (void)tfm; # endif } void BPH_mass_spring_set_motion_state(Implicit_Data *data, int index, const float x[3], const float v[3]) { world_to_root_v3(data, index, data->X[index], x); world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_set_position(Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->X[index], x); } void BPH_mass_spring_set_velocity(Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_get_motion_state(struct Implicit_Data *data, int index, float x[3], float v[3]) { if (x) { root_to_world_v3(data, index, x, data->X[index]); } if (v) { root_to_world_v3(data, index, v, data->V[index]); } } void BPH_mass_spring_get_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->X[index]); } void BPH_mass_spring_get_new_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->Xnew[index]); } void BPH_mass_spring_set_new_position(struct Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->Xnew[index], x); } void BPH_mass_spring_get_new_velocity(struct Implicit_Data *data, int index, float v[3]) { root_to_world_v3(data, index, v, data->Vnew[index]); } void BPH_mass_spring_set_new_velocity(struct Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->Vnew[index], v); } /* -------------------------------- */ static int BPH_mass_spring_add_block(Implicit_Data *data, int v1, int v2) { int s = data->M[0].vcount + data->num_blocks; /* index from array start */ BLI_assert(s < data->M[0].vcount + data->M[0].scount); ++data->num_blocks; /* tfm and S don't have spring entries (diagonal blocks only) */ init_fmatrix(data->bigI + s, v1, v2); init_fmatrix(data->M + s, v1, v2); init_fmatrix(data->dFdX + s, v1, v2); init_fmatrix(data->dFdV + s, v1, v2); init_fmatrix(data->A + s, v1, v2); init_fmatrix(data->P + s, v1, v2); init_fmatrix(data->Pinv + s, v1, v2); return s; } void BPH_mass_spring_clear_constraints(Implicit_Data *data) { int i, numverts = data->S[0].vcount; for (i = 0; i < numverts; ++i) { unit_m3(data->S[i].m); zero_v3(data->z[i]); } } void BPH_mass_spring_add_constraint_ndof0(Implicit_Data *data, int index, const float dV[3]) { zero_m3(data->S[index].m); world_to_root_v3(data, index, data->z[index], dV); } void BPH_mass_spring_add_constraint_ndof1( Implicit_Data *data, int index, const float c1[3], const float c2[3], const float dV[3]) { float m[3][3], p[3], q[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); world_to_root_v3(data, index, q, c2); mul_fvectorT_fvector(cmat, q, q); sub_m3_m3m3(m, m, cmat); /* XXX not sure but multiplication should work here */ copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_add_constraint_ndof2(Implicit_Data *data, int index, const float c1[3], const float dV[3]) { float m[3][3], p[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_clear_forces(Implicit_Data *data) { int numverts = data->M[0].vcount; zero_lfvector(data->F, numverts); init_bfmatrix(data->dFdX, ZERO); init_bfmatrix(data->dFdV, ZERO); data->num_blocks = 0; } void BPH_mass_spring_force_reference_frame(Implicit_Data *data, int index, const float acceleration[3], const float omega[3], const float domega_dt[3], float mass) { # ifdef CLOTH_ROOT_FRAME float acc[3], w[3], dwdt[3]; float f[3], dfdx[3][3], dfdv[3][3]; float euler[3], coriolis[3], centrifugal[3], rotvel[3]; float deuler[3][3], dcoriolis[3][3], dcentrifugal[3][3], drotvel[3][3]; world_to_root_v3(data, index, acc, acceleration); world_to_root_v3(data, index, w, omega); world_to_root_v3(data, index, dwdt, domega_dt); cross_v3_v3v3(euler, dwdt, data->X[index]); cross_v3_v3v3(coriolis, w, data->V[index]); mul_v3_fl(coriolis, 2.0f); cross_v3_v3v3(rotvel, w, data->X[index]); cross_v3_v3v3(centrifugal, w, rotvel); sub_v3_v3v3(f, acc, euler); sub_v3_v3(f, coriolis); sub_v3_v3(f, centrifugal); mul_v3_fl(f, mass); /* F = m * a */ cross_v3_identity(deuler, dwdt); cross_v3_identity(dcoriolis, w); mul_m3_fl(dcoriolis, 2.0f); cross_v3_identity(drotvel, w); cross_m3_v3m3(dcentrifugal, w, drotvel); add_m3_m3m3(dfdx, deuler, dcentrifugal); negate_m3(dfdx); mul_m3_fl(dfdx, mass); copy_m3_m3(dfdv, dcoriolis); negate_m3(dfdv); mul_m3_fl(dfdv, mass); add_v3_v3(data->F[index], f); add_m3_m3m3(data->dFdX[index].m, data->dFdX[index].m, dfdx); add_m3_m3m3(data->dFdV[index].m, data->dFdV[index].m, dfdv); # else (void)data; (void)index; (void)acceleration; (void)omega; (void)domega_dt; # endif } void BPH_mass_spring_force_gravity(Implicit_Data *data, int index, float mass, const float g[3]) { /* force = mass * acceleration (in this case: gravity) */ float f[3]; world_to_root_v3(data, index, f, g); mul_v3_fl(f, mass); add_v3_v3(data->F[index], f); } void BPH_mass_spring_force_drag(Implicit_Data *data, float drag) { int i, numverts = data->M[0].vcount; for (i = 0; i < numverts; i++) { float tmp[3][3]; /* NB: uses root space velocity, no need to transform */ madd_v3_v3fl(data->F[i], data->V[i], -drag); copy_m3_m3(tmp, I); mul_m3_fl(tmp, -drag); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tmp); } } void BPH_mass_spring_force_extern( struct Implicit_Data *data, int i, const float f[3], float dfdx[3][3], float dfdv[3][3]) { float tf[3], tdfdx[3][3], tdfdv[3][3]; world_to_root_v3(data, i, tf, f); world_to_root_m3(data, i, tdfdx, dfdx); world_to_root_m3(data, i, tdfdv, dfdv); add_v3_v3(data->F[i], tf); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, tdfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tdfdv); } static float calc_nor_area_tri(float nor[3], const float v1[3], const float v2[3], const float v3[3]) { float n1[3], n2[3]; sub_v3_v3v3(n1, v1, v2); sub_v3_v3v3(n2, v2, v3); cross_v3_v3v3(nor, n1, n2); return normalize_v3(nor); } /* XXX does not support force jacobians yet, since the effector system does not provide them either */ void BPH_mass_spring_force_face_wind( Implicit_Data *data, int v1, int v2, int v3, const float (*winvec)[3]) { const float effector_scale = 0.02f; float win[3], nor[3], area; float factor; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); factor = effector_scale * area / 3.0f; world_to_root_v3(data, v1, win, winvec[v1]); madd_v3_v3fl(data->F[v1], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v2, win, winvec[v2]); madd_v3_v3fl(data->F[v2], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v3, win, winvec[v3]); madd_v3_v3fl(data->F[v3], nor, factor * dot_v3v3(win, nor)); } static void edge_wind_vertex(const float dir[3], float length, float radius, const float wind[3], float f[3], float UNUSED(dfdx[3][3]), float UNUSED(dfdv[3][3])) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float cos_alpha, sin_alpha, cross_section; float windlen = len_v3(wind); if (windlen == 0.0f) { zero_v3(f); return; } /* angle of wind direction to edge */ cos_alpha = dot_v3v3(wind, dir) / windlen; sin_alpha = sqrtf(1.0f - cos_alpha * cos_alpha); cross_section = radius * ((float)M_PI * radius * sin_alpha + length * cos_alpha); mul_v3_v3fl(f, wind, density * cross_section); } void BPH_mass_spring_force_edge_wind( Implicit_Data *data, int v1, int v2, float radius1, float radius2, const float (*winvec)[3]) { float win[3], dir[3], length; float f[3], dfdx[3][3], dfdv[3][3]; sub_v3_v3v3(dir, data->X[v1], data->X[v2]); length = normalize_v3(dir); world_to_root_v3(data, v1, win, winvec[v1]); edge_wind_vertex(dir, length, radius1, win, f, dfdx, dfdv); add_v3_v3(data->F[v1], f); world_to_root_v3(data, v2, win, winvec[v2]); edge_wind_vertex(dir, length, radius2, win, f, dfdx, dfdv); add_v3_v3(data->F[v2], f); } void BPH_mass_spring_force_vertex_wind(Implicit_Data *data, int v, float UNUSED(radius), const float (*winvec)[3]) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float wind[3]; float f[3]; world_to_root_v3(data, v, wind, winvec[v]); mul_v3_v3fl(f, wind, density); add_v3_v3(data->F[v], f); } BLI_INLINE void dfdx_spring(float to[3][3], const float dir[3], float length, float L, float k) { // dir is unit length direction, rest is spring's restlength, k is spring constant. // return ( (I-outerprod(dir, dir))*Min(1.0f, rest/length) - I) * -k; outerproduct(to, dir, dir); sub_m3_m3m3(to, I, to); mul_m3_fl(to, (L / length)); sub_m3_m3m3(to, to, I); mul_m3_fl(to, k); } /* unused */ # if 0 BLI_INLINE void dfdx_damp(float to[3][3], const float dir[3], float length, const float vel[3], float rest, float damping) { // inner spring damping vel is the relative velocity of the endpoints. // return (I-outerprod(dir, dir)) * (-damping * -(dot(dir, vel)/Max(length, rest))); mul_fvectorT_fvector(to, dir, dir); sub_fmatrix_fmatrix(to, I, to); mul_fmatrix_S(to, (-damping * -(dot_v3v3(dir, vel) / MAX2(length, rest)))); } # endif BLI_INLINE void dfdv_damp(float to[3][3], const float dir[3], float damping) { // derivative of force wrt velocity outerproduct(to, dir, dir); mul_m3_fl(to, -damping); } BLI_INLINE float fb(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; float xxxx = xxx * x; return (-11.541f * xxxx + 34.193f * xxx - 39.083f * xx + 23.116f * x - 9.713f); } BLI_INLINE float fbderiv(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; return (-46.164f * xxx + 102.579f * xx - 78.166f * x + 23.116f); } BLI_INLINE float fbstar(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return fbstar_fl; } else { return tempfb_fl; } } // function to calculae bending spring force (taken from Choi & Co) BLI_INLINE float fbstar_jacobi(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return -cb; } else { return -kb * fbderiv(length, L); } } /* calculate elonglation */ BLI_INLINE bool spring_length(Implicit_Data *data, int i, int j, float r_extent[3], float r_dir[3], float *r_length, float r_vel[3]) { sub_v3_v3v3(r_extent, data->X[j], data->X[i]); sub_v3_v3v3(r_vel, data->V[j], data->V[i]); *r_length = len_v3(r_extent); if (*r_length > ALMOST_ZERO) { # if 0 if (length > L) { if ((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) && (((length - L) * 100.0f / L) > clmd->sim_parms->maxspringlen)) { // cut spring! s->flags |= CSPRING_FLAG_DEACTIVATE; return false; } } # endif mul_v3_v3fl(r_dir, r_extent, 1.0f / (*r_length)); } else { zero_v3(r_dir); } return true; } BLI_INLINE void apply_spring( Implicit_Data *data, int i, int j, const float f[3], float dfdx[3][3], float dfdv[3][3]) { int block_ij = BPH_mass_spring_add_block(data, i, j); add_v3_v3(data->F[i], f); sub_v3_v3(data->F[j], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfdx); sub_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfdv); sub_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfdv); } bool BPH_mass_spring_force_spring_linear(Implicit_Data *data, int i, int j, float restlen, float stiffness_tension, float damping_tension, float stiffness_compression, float damping_compression, bool resist_compress, bool new_compress, float clamp_force) { float extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; float damping = 0; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); /* This code computes not only the force, but also its derivative. * Zero derivative effectively disables the spring for the implicit solver. * Thus length > restlen makes cloth unconstrained at the start of simulation. */ if ((length >= restlen && length > 0) || resist_compress) { float stretch_force; damping = damping_tension; stretch_force = stiffness_tension * (length - restlen); if (clamp_force > 0.0f && stretch_force > clamp_force) { stretch_force = clamp_force; } mul_v3_v3fl(f, dir, stretch_force); dfdx_spring(dfdx, dir, length, restlen, stiffness_tension); } else if (new_compress) { /* This is based on the Choi and Ko bending model, * which works surprisingly well for compression. */ float kb = stiffness_compression; float cb = kb; /* cb equal to kb seems to work, but a factor can be added if necessary */ damping = damping_compression; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); } else { return false; } madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdv_damp(dfdv, dir, damping); apply_spring(data, i, j, f, dfdx, dfdv); return true; } /* See "Stable but Responsive Cloth" (Choi, Ko 2005) */ bool BPH_mass_spring_force_spring_bending( Implicit_Data *data, int i, int j, float restlen, float kb, float cb) { float extent[3], length, dir[3], vel[3]; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); if (length < restlen) { float f[3], dfdx[3][3], dfdv[3][3]; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); /* XXX damping not supported */ zero_m3(dfdv); apply_spring(data, i, j, f, dfdx, dfdv); return true; } else { return false; } } BLI_INLINE void poly_avg(lfVector *data, int *inds, int len, float r_avg[3]) { float fact = 1.0f / (float)len; zero_v3(r_avg); for (int i = 0; i < len; i++) { madd_v3_v3fl(r_avg, data[inds[i]], fact); } } BLI_INLINE void poly_norm(lfVector *data, int i, int j, int *inds, int len, float r_dir[3]) { float mid[3]; poly_avg(data, inds, len, mid); normal_tri_v3(r_dir, data[i], data[j], mid); } BLI_INLINE void edge_avg(lfVector *data, int i, int j, float r_avg[3]) { r_avg[0] = (data[i][0] + data[j][0]) * 0.5f; r_avg[1] = (data[i][1] + data[j][1]) * 0.5f; r_avg[2] = (data[i][2] + data[j][2]) * 0.5f; } BLI_INLINE void edge_norm(lfVector *data, int i, int j, float r_dir[3]) { sub_v3_v3v3(r_dir, data[i], data[j]); normalize_v3(r_dir); } BLI_INLINE float bend_angle(float dir_a[3], float dir_b[3], float dir_e[3]) { float cos, sin; float tmp[3]; cos = dot_v3v3(dir_a, dir_b); cross_v3_v3v3(tmp, dir_a, dir_b); sin = dot_v3v3(tmp, dir_e); return atan2f(sin, cos); } BLI_INLINE void spring_angle(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float r_dir_a[3], float r_dir_b[3], float *r_angle, float r_vel_a[3], float r_vel_b[3]) { float dir_e[3], vel_e[3]; poly_norm(data->X, j, i, i_a, len_a, r_dir_a); poly_norm(data->X, i, j, i_b, len_b, r_dir_b); edge_norm(data->X, i, j, dir_e); *r_angle = bend_angle(r_dir_a, r_dir_b, dir_e); poly_avg(data->V, i_a, len_a, r_vel_a); poly_avg(data->V, i_b, len_b, r_vel_b); edge_avg(data->V, i, j, vel_e); sub_v3_v3(r_vel_a, vel_e); sub_v3_v3(r_vel_b, vel_e); } /* Angular springs roughly based on the bending model proposed by Baraff and Witkin in "Large Steps * in Cloth Simulation". */ bool BPH_mass_spring_force_spring_angular(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float restang, float stiffness, float damping) { float angle, dir_a[3], dir_b[3], vel_a[3], vel_b[3]; float f_a[3], f_b[3], f_e[3]; float force; int x; spring_angle(data, i, j, i_a, i_b, len_a, len_b, dir_a, dir_b, &angle, vel_a, vel_b); /* spring force */ force = stiffness * (angle - restang); /* damping force */ force += -damping * (dot_v3v3(vel_a, dir_a) + dot_v3v3(vel_b, dir_b)); mul_v3_v3fl(f_a, dir_a, force / len_a); mul_v3_v3fl(f_b, dir_b, force / len_b); for (x = 0; x < len_a; x++) { add_v3_v3(data->F[i_a[x]], f_a); } for (x = 0; x < len_b; x++) { add_v3_v3(data->F[i_b[x]], f_b); } mul_v3_v3fl(f_a, dir_a, force * 0.5f); mul_v3_v3fl(f_b, dir_b, force * 0.5f); add_v3_v3v3(f_e, f_a, f_b); sub_v3_v3(data->F[i], f_e); sub_v3_v3(data->F[j], f_e); return true; } /* Jacobian of a direction vector. * Basically the part of the differential orthogonal to the direction, * inversely proportional to the length of the edge. * * dD_ij/dx_i = -dD_ij/dx_j = (D_ij * D_ij^T - I) / len_ij */ BLI_INLINE void spring_grad_dir( Implicit_Data *data, int i, int j, float edge[3], float dir[3], float grad_dir[3][3]) { float length; sub_v3_v3v3(edge, data->X[j], data->X[i]); length = normalize_v3_v3(dir, edge); if (length > ALMOST_ZERO) { outerproduct(grad_dir, dir, dir); sub_m3_m3m3(grad_dir, I, grad_dir); mul_m3_fl(grad_dir, 1.0f / length); } else { zero_m3(grad_dir); } } BLI_INLINE void spring_hairbend_forces(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, const float dx[3], const float dv[3], float r_f[3]) { float edge_ij[3], dir_ij[3]; float edge_jk[3], dir_jk[3]; float vel_ij[3], vel_jk[3], vel_ortho[3]; float f_bend[3], f_damp[3]; float fk[3]; float dist[3]; zero_v3(fk); sub_v3_v3v3(edge_ij, data->X[j], data->X[i]); if (q == i) { sub_v3_v3(edge_ij, dx); } if (q == j) { add_v3_v3(edge_ij, dx); } normalize_v3_v3(dir_ij, edge_ij); sub_v3_v3v3(edge_jk, data->X[k], data->X[j]); if (q == j) { sub_v3_v3(edge_jk, dx); } if (q == k) { add_v3_v3(edge_jk, dx); } normalize_v3_v3(dir_jk, edge_jk); sub_v3_v3v3(vel_ij, data->V[j], data->V[i]); if (q == i) { sub_v3_v3(vel_ij, dv); } if (q == j) { add_v3_v3(vel_ij, dv); } sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); if (q == j) { sub_v3_v3(vel_jk, dv); } if (q == k) { add_v3_v3(vel_jk, dv); } /* bending force */ sub_v3_v3v3(dist, goal, edge_jk); mul_v3_v3fl(f_bend, dist, stiffness); add_v3_v3(fk, f_bend); /* damping force */ madd_v3_v3v3fl(vel_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); mul_v3_v3fl(f_damp, vel_ortho, damping); sub_v3_v3(fk, f_damp); copy_v3_v3(r_f, fk); } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdx(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdx[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; ++a) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_pos[a], dvec_null[a], f); copy_v3_v3(dfdx[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_neg[a], dvec_null[a], f); sub_v3_v3(dfdx[a], f); for (b = 0; b < 3; ++b) { dfdx[a][b] /= delta; } } } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdv(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdv[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; ++a) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_pos[a], f); copy_v3_v3(dfdv[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_neg[a], f); sub_v3_v3(dfdv[a], f); for (b = 0; b < 3; ++b) { dfdv[a][b] /= delta; } } } /* Angular spring that pulls the vertex toward the local target * See "Artistic Simulation of Curly Hair" (Pixar technical memo #12-03a) */ bool BPH_mass_spring_force_spring_bending_hair(Implicit_Data *data, int i, int j, int k, const float target[3], float stiffness, float damping) { float goal[3]; float fj[3], fk[3]; float dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfj_dvi[3][3], dfj_dvj[3][3], dfk_dvi[3][3], dfk_dvj[3][3], dfk_dvk[3][3]; const float vecnull[3] = {0.0f, 0.0f, 0.0f}; int block_ij = BPH_mass_spring_add_block(data, i, j); int block_jk = BPH_mass_spring_add_block(data, j, k); int block_ik = BPH_mass_spring_add_block(data, i, k); world_to_root_v3(data, j, goal, target); spring_hairbend_forces(data, i, j, k, goal, stiffness, damping, k, vecnull, vecnull, fk); negate_v3_v3(fj, fk); /* counterforce */ spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, i, dfk_dxi); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, j, dfk_dxj); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, k, dfk_dxk); copy_m3_m3(dfj_dxi, dfk_dxi); negate_m3(dfj_dxi); copy_m3_m3(dfj_dxj, dfk_dxj); negate_m3(dfj_dxj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, i, dfk_dvi); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, j, dfk_dvj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, k, dfk_dvk); copy_m3_m3(dfj_dvi, dfk_dvi); negate_m3(dfj_dvi); copy_m3_m3(dfj_dvj, dfk_dvj); negate_m3(dfj_dvj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfj_dvj); add_m3_m3m3(data->dFdV[k].m, data->dFdV[k].m, dfk_dvk); add_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfj_dvi); add_m3_m3m3(data->dFdV[block_jk].m, data->dFdV[block_jk].m, dfk_dvj); add_m3_m3m3(data->dFdV[block_ik].m, data->dFdV[block_ik].m, dfk_dvi); /* XXX analytical calculation of derivatives below is incorrect. * This proved to be difficult, but for now just using the finite difference method for * estimating the jacobians should be sufficient. */ # if 0 float edge_ij[3], dir_ij[3], grad_dir_ij[3][3]; float edge_jk[3], dir_jk[3], grad_dir_jk[3][3]; float dist[3], vel_jk[3], vel_jk_ortho[3], projvel[3]; float target[3]; float tmp[3][3]; float fi[3], fj[3], fk[3]; float dfi_dxi[3][3], dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfdvi[3][3]; // TESTING damping = 0.0f; zero_v3(fi); zero_v3(fj); zero_v3(fk); zero_m3(dfi_dxi); zero_m3(dfj_dxi); zero_m3(dfk_dxi); zero_m3(dfk_dxj); zero_m3(dfk_dxk); /* jacobian of direction vectors */ spring_grad_dir(data, i, j, edge_ij, dir_ij, grad_dir_ij); spring_grad_dir(data, j, k, edge_jk, dir_jk, grad_dir_jk); sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); /* bending force */ mul_v3_v3fl(target, dir_ij, restlen); sub_v3_v3v3(dist, target, edge_jk); mul_v3_v3fl(fk, dist, stiffness); /* damping force */ madd_v3_v3v3fl(vel_jk_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); madd_v3_v3fl(fk, vel_jk_ortho, damping); /* XXX this only holds true as long as we assume straight rest shape! * eventually will become a bit more involved since the opposite segment * gets its own target, under condition of having equal torque on both sides. */ copy_v3_v3(fi, fk); /* counterforce on the middle point */ sub_v3_v3(fj, fi); sub_v3_v3(fj, fk); /* === derivatives === */ madd_m3_m3fl(dfk_dxi, grad_dir_ij, stiffness * restlen); madd_m3_m3fl(dfk_dxj, grad_dir_ij, -stiffness * restlen); madd_m3_m3fl(dfk_dxj, I, stiffness); madd_m3_m3fl(dfk_dxk, I, -stiffness); copy_m3_m3(dfi_dxi, dfk_dxk); negate_m3(dfi_dxi); /* dfj_dfi == dfi_dfj due to symmetry, * dfi_dfj == dfk_dfj due to fi == fk * XXX see comment above on future bent rest shapes */ copy_m3_m3(dfj_dxi, dfk_dxj); /* dfj_dxj == -(dfi_dxj + dfk_dxj) due to fj == -(fi + fk) */ sub_m3_m3m3(dfj_dxj, dfj_dxj, dfj_dxi); sub_m3_m3m3(dfj_dxj, dfj_dxj, dfk_dxj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[i], fi); add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfi_dxi); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); # endif return true; } bool BPH_mass_spring_force_spring_goal(Implicit_Data *data, int i, const float goal_x[3], const float goal_v[3], float stiffness, float damping) { float root_goal_x[3], root_goal_v[3], extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; /* goal is in world space */ world_to_root_v3(data, i, root_goal_x, goal_x); world_to_root_v3(data, i, root_goal_v, goal_v); sub_v3_v3v3(extent, root_goal_x, data->X[i]); sub_v3_v3v3(vel, root_goal_v, data->V[i]); length = normalize_v3_v3(dir, extent); if (length > ALMOST_ZERO) { mul_v3_v3fl(f, dir, stiffness * length); // Ascher & Boxman, p.21: Damping only during elonglation // something wrong with it... madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdx_spring(dfdx, dir, length, 0.0f, stiffness); dfdv_damp(dfdv, dir, damping); add_v3_v3(data->F[i], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); return true; } else { return false; } } #endif /* IMPLICIT_SOLVER_BLENDER */
key_coh_hash.h
/* * (C) copyright 2011, Ismael Garcia, (U.Girona/ViRVIG, Spain & INRIA/ALICE, France) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef KEY_COH_HASH_H_ #define KEY_COH_HASH_H_ #include <libhu/hash_utils.h> // ------------------------------------------------------------------ #if (ENABLE_DEVICE_OMP_COMPUTING) __constant__ unsigned int offsets_k_coh[16] = OFFSETS_TABLE_16; #elif (ENABLE_DEVICE_CUDA_COMPUTING) __constant__ unsigned int offsets_k_coh[16] = OFFSETS_TABLE_16; #elif (ENABLE_DEVICE_OPENCL_COMPUTING) __constant__ unsigned int offsets_k_coh[16] = OFFSETS_TABLE_16; #endif class key_coh_hash_functor : libhu::key_hash_functor { public: key_coh_hash_functor() { } typedef libhu::U32 T_KEY; typedef libhu::U32 T_AGE; typedef libhu::U32 T_MAX_AGE; typedef libhu::U32 T_LOC; typedef libhu::U32 T_HASH_TABLE; static const libhu::U32 DEFAULT_GROUP_SIZE; static const libhu::U32 KEY_TYPE_BITS; static const libhu::U32 KEY_TYPE_MASK; static const libhu::U32 PACKED_KEY_TYPE_MASK; static const libhu::U32 KEY_TYPE_RANGE; static const libhu::U32 UNDEFINED_KEY; static const libhu::U32 PACKED_UNDEFINED_KEY; static const libhu::U32 KEY_TYPE_AGE_MASK; static const libhu::U32 KEY_TYPE_AGE_BITS; static const libhu::U32 KEY_TYPE_INIT_AGE; static const libhu::U32 KEY_TYPE_NULL_AGE; static const libhu::U32 KEY_TYPE_MAX_AGE; static const libhu::U32 KEY_TYPE_MAX_AGE_MASK; static const libhu::U32 KEY_TYPE_MAX_AGE_BITS; static const libhu::U32 HTABLE_ID; static const libhu::U32 NOP_MODE_TRUE; static const libhu::U32 NOP_MODE_FALSE; libhu::UPTR hash_tableUPtr; libhu::U32* max_tableUPtr; libhu::U32 hash_table_size; __inline__ HOST DEVICE T_LOC WRAP(T_LOC A, T_LOC B) { return ((A) % (B)); } __inline__ HOST DEVICE T_KEY GET_KEY_POS(T_HASH_TABLE k) { return ((k) & KEY_TYPE_MASK); } __inline__ HOST DEVICE T_KEY GET_KEY_ATTACH_ID(T_HASH_TABLE k) { return ((k) & KEY_TYPE_MASK); } __inline__ HOST DEVICE T_AGE GET_KEY_AGE(T_HASH_TABLE k) { return ((k) >> KEY_TYPE_BITS); } __inline__ HOST DEVICE T_MAX_AGE GET_KEY_MAX_AGE(T_HASH_TABLE k) { return ((k) >> KEY_TYPE_BITS); } __inline__ HOST DEVICE T_HASH_TABLE PACK_KEY_POS(T_KEY p) { return ((p) & KEY_TYPE_MASK); } __inline__ HOST DEVICE T_HASH_TABLE PACK_KEY_POS_AND_AGE(T_KEY p, T_AGE a) { return (((a << KEY_TYPE_BITS)) + (p & KEY_TYPE_MASK)); } __inline__ HOST DEVICE T_HASH_TABLE PACK_KEY_POS_AND_MAX_AGE(T_KEY p, T_MAX_AGE m) { return (((m << KEY_TYPE_BITS)) + (p & KEY_TYPE_MASK)); } // Hash function __inline__ HOST DEVICE T_LOC h(T_KEY K, T_AGE AGE, libhu::U32 HSZ) { #if (ENABLE_HOST_COMPUTING) static unsigned int offsets_k_coh[16] = OFFSETS_TABLE_16; #endif return WRAP((offsets_k_coh[AGE] + K), HSZ); } // Max. age operator to update hash_table DEVICE void operator()(T_HASH_TABLE& t) { libhu::U32 i = (((libhu::UPTR)thrust::raw_pointer_cast(&t)) - ((libhu::UPTR)hash_tableUPtr)) / (sizeof(T_HASH_TABLE)); if (t != PACKED_UNDEFINED_KEY) { t = PACK_KEY_POS_AND_MAX_AGE(GET_KEY_POS(t), max_tableUPtr[i]); } } template<typename T_KEY, typename T_HASH_TABLE, typename T_MAX_AGE, typename T_HASH_FUNCTOR, typename T_MAX_AGE_COMPUTATION_FUNCTOR> __inline__ HOST DEVICE void hash_kernel(libhu::U32 keys_size, libhu::U32 hash_table_size, T_KEY keys[], T_HASH_TABLE hash_table[], T_MAX_AGE max_table[], T_HASH_FUNCTOR hf, T_MAX_AGE_COMPUTATION_FUNCTOR maf) { #if (ENABLE_DEVICE_OMP_COMPUTING) #if (OMP_CUSTOM_OPTIONS_ON_RUNTIME) omp_set_num_threads(OMP_CUSTOM_NUM_THREADS); omp_set_schedule(omp_sched_dynamic,OMP_CUSTOM_CHUNK_SIZE); #pragma omp parallel for num_threads(OMP_CUSTOM_NUM_THREADS),schedule(runtime) for (libhu::S32 GTID = 0; GTID < keys_size; GTID++) #else #pragma omp parallel for num_threads(OMP_CUSTOM_NUM_THREADS),schedule(dynamic, OMP_CUSTOM_CHUNK_SIZE) for (libhu::S32 GTID = 0; GTID < keys_size; GTID++) #endif #endif #if (ENABLE_HOST_COMPUTING) for (libhu::S32 GTID = 0; GTID < keys_size; GTID++) #endif { // initialize variables libhu::U32 LOC; libhu::U32 ROOT_LOC; libhu::U8 AGE = KEY_TYPE_NULL_AGE; T_HASH_TABLE EVICTED_PKEY; T_HASH_TABLE PKEY = (GTID < keys_size) ? PACK_KEY_POS_AND_MAX_AGE(keys[ (GTID) ], KEY_TYPE_INIT_AGE) : PACKED_UNDEFINED_KEY; AGE = (GTID < keys_size) ? KEY_TYPE_NULL_AGE : KEY_TYPE_MAX_AGE; while (AGE < KEY_TYPE_MAX_AGE) { LOC = h(GET_KEY_POS(PKEY), AGE, hash_table_size); EVICTED_PKEY = libhu::atomicMaxU32(&hash_table[ (LOC) ], PKEY ); if (EVICTED_PKEY < PKEY) { maf.update_max_age(hash_table_size, PKEY, AGE, max_table, hf); if (GET_KEY_AGE(EVICTED_PKEY) > 0u) { PKEY = EVICTED_PKEY; AGE = GET_KEY_AGE(EVICTED_PKEY); } else { break; } } else { AGE++; PKEY = PACK_KEY_POS_AND_MAX_AGE(GET_KEY_POS(PKEY), AGE); } } } } }; const libhu::U32 key_coh_hash_functor::DEFAULT_GROUP_SIZE = 192u; const libhu::U32 key_coh_hash_functor::KEY_TYPE_BITS = 28u; const libhu::U32 key_coh_hash_functor::KEY_TYPE_MASK = libhu::U32( libhu::U64((1ull) << KEY_TYPE_BITS) - 1u ); const libhu::U32 key_coh_hash_functor::PACKED_KEY_TYPE_MASK = libhu::U32( libhu::U64((1ull) << KEY_TYPE_BITS) - 1u ); const libhu::U32 key_coh_hash_functor::KEY_TYPE_RANGE = libhu::U32( libhu::U64((1ull) << KEY_TYPE_BITS) - 2u ); const libhu::U32 key_coh_hash_functor::UNDEFINED_KEY = libhu::U32( libhu::U64((1ull) << KEY_TYPE_BITS) - 1u ); const libhu::U32 key_coh_hash_functor::PACKED_UNDEFINED_KEY = libhu::U32( libhu::U64((1ull) << KEY_TYPE_BITS) - 1u ); const libhu::U32 key_coh_hash_functor::KEY_TYPE_AGE_MASK = 15u; const libhu::U32 key_coh_hash_functor::KEY_TYPE_AGE_BITS = 4u; const libhu::U32 key_coh_hash_functor::KEY_TYPE_INIT_AGE = 1u; const libhu::U32 key_coh_hash_functor::KEY_TYPE_NULL_AGE = 0u; const libhu::U32 key_coh_hash_functor::KEY_TYPE_MAX_AGE = 16u; const libhu::U32 key_coh_hash_functor::KEY_TYPE_MAX_AGE_MASK = 4u; const libhu::U32 key_coh_hash_functor::KEY_TYPE_MAX_AGE_BITS = 4u; const libhu::U32 key_coh_hash_functor::HTABLE_ID = 0u; const libhu::U32 key_coh_hash_functor::NOP_MODE_TRUE = 255u; const libhu::U32 key_coh_hash_functor::NOP_MODE_FALSE = 0u; #endif
GB_unaryop__minv_uint64_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_uint64 // op(A') function: GB_tran__minv_uint64_uint64 // C type: uint64_t // A type: uint64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_uint64 ( uint64_t *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hierarchical_sne_inl.h
/* * * Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the Delft University of Technology. * 4. Neither the name of the Delft University of Technology nor the names of * its contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * */ #ifndef HIERARCHICAL_SNE_INL #define HIERARCHICAL_SNE_INL #include <omp.h> #include "hdi/dimensionality_reduction/hierarchical_sne.h" #include "hdi/utils/math_utils.h" #include "hdi/utils/log_helper_functions.h" #include "hdi/utils/scoped_timers.h" #include <random> #include <chrono> #include <unordered_set> #include <unordered_map> #include <numeric> #include "hdi/utils/memory_utils.h" #include "hdi/data/map_mem_eff.h" #include "hdi/data/map_helpers.h" #include "hdi/data/io.h" #include "hdi/utils/log_progress.h" #ifdef HNSWLIB_FOUND #ifdef _MSC_VER #if (__cplusplus >=201103) #include "hnswlib/hnswlib.h" #include "hnswlib/space_l2.h" #define HNSWLIB_SUPPORTED #endif //__cplusplus >=201103 #else // _MSC_VER #include "hnswlib/hnswlib.h" #include "hnswlib/space_l2.h" #define HNSWLIB_SUPPORTED #endif // _MSC_VER #endif //HNSWLIB_FOUND //#ifdef __USE_GCD__ //#include <dispatch/dispatch.h> //#endif #pragma warning( push ) #pragma warning( disable : 4267) #pragma warning( push ) #pragma warning( disable : 4291) #pragma warning( push ) #pragma warning( disable : 4996) #pragma warning( push ) #pragma warning( disable : 4018) #pragma warning( push ) #pragma warning( disable : 4244) //#define FLANN_USE_CUDA #include "flann/flann.h" #pragma warning( pop ) #pragma warning( pop ) #pragma warning( pop ) #pragma warning( pop ) #pragma warning( pop ) namespace hdi{ namespace dr{ ///////////////////////////////////////////////////////////////////////// template <typename scalar_type, typename sparse_scalar_matrix_type> HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::Parameters::Parameters(): _seed(-1), _num_neighbors(30), _aknn_num_trees(4), _aknn_num_checks(1024), _aknn_algorithm(-1), _aknn_algorithmP1(0), _aknn_algorithmP2(0), _monte_carlo_sampling(true), _mcmcs_num_walks(10), _mcmcs_landmark_thresh(1.5), _mcmcs_walk_length(10), _hard_cut_off(false), _hard_cut_off_percentage(0.1f), _rs_reduction_factor_per_layer(.1), _rs_outliers_removal_jumps(10), _num_walks_per_landmark(100), _transition_matrix_prune_thresh(1.5), _out_of_core_computation(false) {} ///////////////////////////////////////////////////////////////////////// template <typename scalar_type, typename sparse_scalar_matrix_type> HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::Statistics::Statistics(): _total_time(-1), _init_knn_time(-1), _init_probabilities_time(-1), _init_fmc_time(-1), _mcmc_sampling_time(-1), _landmarks_selection_time(-1), _landmarks_selection_num_walks(-1), _aoi_time(-1), _fmc_time(-1), _aoi_num_walks(-1), _aoi_sparsity(-1), _fmc_sparsity(-1), _fmc_effective_sparsity(-1) {} template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::Statistics::reset(){ _total_time = -1; _init_knn_time = -1; _init_probabilities_time = -1; _init_fmc_time = -1; _mcmc_sampling_time = -1; _landmarks_selection_time = -1; _landmarks_selection_num_walks = -1; _aoi_time = -1; _fmc_time = -1; _aoi_num_walks = -1; _aoi_sparsity = -1; _fmc_sparsity = -1; _fmc_effective_sparsity = -1; } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::Statistics::log(utils::AbstractLog* logger)const{ utils::secureLog(logger,"\n--------------- Hierarchical-SNE Statistics ------------------"); utils::secureLogValue(logger,"Total time",_total_time); if(_init_knn_time != -1){ utils::secureLogValue(logger,"\tAKNN graph computation time", _init_knn_time,true,2);} if(_init_probabilities_time != -1){ utils::secureLogValue(logger,"\tTransition probabilities computation time", _init_probabilities_time,true,1);} if(_init_fmc_time != -1){ utils::secureLogValue(logger,"\tFMC computation time", _init_fmc_time,true,3);} if(_mcmc_sampling_time != -1){ utils::secureLogValue(logger,"\tMarkov Chain Monte Carlo sampling time", _mcmc_sampling_time,true,1);} if(_landmarks_selection_time != -1){ utils::secureLogValue(logger,"\tLandmark selection time", _landmarks_selection_time,true,2);} if(_landmarks_selection_num_walks != -1){ utils::secureLogValue(logger,"\tLndks Slct #walks", _landmarks_selection_num_walks,true,3);} if(_aoi_time != -1){ utils::secureLogValue(logger,"\tArea of Influence computation time", _aoi_time,true,1);} if(_fmc_time != -1){ utils::secureLogValue(logger,"\tFMC computation time", _fmc_time,true,3);} if(_aoi_num_walks != -1){ utils::secureLogValue(logger,"\tAoI #walks", _aoi_num_walks,true,4);} if(_aoi_sparsity != -1){ utils::secureLogValue(logger,"\tIs sparsity (%)", _aoi_sparsity*100,true,3);} if(_fmc_sparsity != -1){ utils::secureLogValue(logger,"\tTs sparsity (%)", _fmc_sparsity*100,true,3);} if(_fmc_effective_sparsity != -1){ utils::secureLogValue(logger,"\tTs effective sparsity (%)", _fmc_effective_sparsity*100,true,2);} utils::secureLog(logger,"--------------------------------------------------------------\n"); } ///////////////////////////////////////////////////////////////////////// template <typename scalar_type, typename sparse_scalar_matrix_type> scalar_type HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::Scale::mimMemoryOccupation()const{ scalar_type mem(0); mem += _landmark_to_original_data_idx.capacity()*sizeof(unsigned_int_type); mem += _landmark_to_previous_scale_idx.capacity()*sizeof(unsigned_int_type); mem += _landmark_weight.capacity()*sizeof(scalar_type); for(int i = 0; i < _transition_matrix.size(); ++i){ mem += _transition_matrix[i].size()*(sizeof(unsigned_int_type)+sizeof(scalar_type)); } mem += _previous_scale_to_landmark_idx.capacity()*sizeof(int_type); for(int i = 0; i < _area_of_influence.size(); ++i){ mem += _area_of_influence[i].size()*(sizeof(unsigned_int_type)+sizeof(scalar_type)); } return mem / 1024 / 1024; } ///////////////////////////////////////////////////////////////////////// template <typename scalar_type, typename sparse_scalar_matrix_type> HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::HierarchicalSNE(): _initialized(false), _dimensionality(0), _logger(nullptr), _high_dimensional_data(nullptr), _verbose(false) { } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::reset(){ _initialized = false; } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::clear(){ _high_dimensional_data = nullptr; _initialized = false; } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::getHighDimensionalDescriptor(scalar_vector_type& data_point, data_handle_type handle)const{ data_point.resize(_dimensionality); for(unsigned_int_type i = 0; i < _dimensionality; ++i){ data_point[i] = *(_high_dimensional_data + handle*_dimensionality +i); } } ///////////////////////////////////////////////////////////////////////// template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::initialize(scalar_type* high_dimensional_data, unsigned_int_type num_dps, Parameters params){ _statistics.reset(); utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time); utils::secureLog(_logger,"Initializing Hierarchical-SNE..."); _params = params; _high_dimensional_data = high_dimensional_data; _num_dps = num_dps; utils::secureLogValue(_logger,"Number of data points",_num_dps); initializeFirstScale(); _initialized = true; utils::secureLog(_logger,"Initialization complete!"); } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::initialize(const sparse_scalar_matrix_type& similarities, Parameters params){ _statistics.reset(); utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time); utils::secureLog(_logger,"Initializing Hierarchical-SNE..."); _params = params; _high_dimensional_data = nullptr; _num_dps = similarities.size(); utils::secureLogValue(_logger,"Number of data points",_num_dps); initializeFirstScale(similarities); _initialized = true; utils::secureLog(_logger,"Initialization complete!"); } template <typename scalar_type, typename sparse_scalar_matrix_type> bool HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::addScale(){ _statistics.reset(); utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time); bool res(true); if(_params._out_of_core_computation){ addScaleOutOfCoreImpl(); }else{ addScaleImpl(); } _statistics.log(_logger); return res; } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::computeNeighborhoodGraph(scalar_vector_type& distance_based_probabilities, std::vector<int>& neighborhood_graph){ unsigned_int_type nn = _params._num_neighbors + 1; scalar_type perplexity = _params._num_neighbors / 3.; neighborhood_graph.resize(_num_dps*nn); distance_based_probabilities.resize(_num_dps*nn); #ifdef HNSWLIB_SUPPORTED if(_params._aknn_algorithm == -1) #endif { utils::secureLog(_logger, "Computing the neighborhood graph..."); flann::Matrix<scalar_type> dataset(_high_dimensional_data, _num_dps, _dimensionality); flann::Matrix<scalar_type> query(_high_dimensional_data, _num_dps, _dimensionality); flann::Index<flann::L2<scalar_type> > index(dataset, flann::KDTreeIndexParams(_params._aknn_num_trees)); utils::secureLog(_logger,"\tBuilding the trees..."); utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._init_knn_time); index.buildIndex(); flann::Matrix<int> indices_mat(neighborhood_graph.data(), query.rows, nn); flann::Matrix<scalar_type> dists_mat(distance_based_probabilities.data(), query.rows, nn); flann::SearchParams params(_params._aknn_num_checks); params.cores = 0; //all cores utils::secureLog(_logger,"\tAKNN queries..."); index.knnSearch(query, indices_mat, dists_mat, nn, params); } #ifdef HNSWLIB_SUPPORTED else { utils::secureLog(_logger, "Computing the neighborhood graph with HNSW Lib..."); hnswlib::L2Space l2space(_dimensionality); hnswlib::HierarchicalNSW<scalar_type> appr_alg(&l2space, _num_dps, _params._aknn_algorithmP1, _params._aknn_algorithmP2, 0); utils::secureLog(_logger, "\tBuilding the trees..."); utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._init_knn_time); appr_alg.addPoint((void*)_high_dimensional_data, (std::size_t) 0); #pragma omp parallel for for (int i = 1; i < _num_dps; ++i) { appr_alg.addPoint((void*)(_high_dimensional_data + (i*_dimensionality)), (hnswlib::labeltype) i); } utils::secureLog(_logger, "\tAKNN queries..."); // #pragma omp parallel for for (int i = 0; i < _num_dps; ++i) { auto top_candidates = appr_alg.searchKnn(_high_dimensional_data + (i*_dimensionality), (hnswlib::labeltype)nn); scalar_type *distances = distance_based_probabilities.data() + (i*nn); int *indices = neighborhood_graph.data() + (i*nn); int j = 0; assert(top_candidates.size() == nn); while (top_candidates.size() > 0) { auto rez = top_candidates.top(); distances[nn - j - 1] = rez.first; indices[nn - j - 1] = rez.second; top_candidates.pop(); ++j; } } } #endif { utils::secureLog(_logger,"\tFMC computation..."); utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._init_probabilities_time); //#ifdef __USE_GCD__ // std::cout << "GCD dispatch, hierarchical_sne_inl 253.\n"; // dispatch_apply(_num_dps, dispatch_get_global_queue(0, 0), ^(size_t d) { //#else #pragma omp parallel for for(int_type d = 0; d < _num_dps; ++d){ //#endif //__USE_GCD__ //It could be that the point itself is not the nearest one if two points are identical... I want the point itself to be the first one! if(neighborhood_graph[d*nn] != d){ int to_swap = d*nn; for(; to_swap < d*nn+nn; ++to_swap){ if(neighborhood_graph[to_swap] == d) break; } std::swap(neighborhood_graph[nn*d],neighborhood_graph[to_swap]); std::swap(distance_based_probabilities[nn*d],distance_based_probabilities[to_swap]); } scalar_vector_type temp_probability(nn,0); utils::computeGaussianDistributionWithFixedPerplexity<scalar_vector_type>( distance_based_probabilities.cbegin() + d * nn, distance_based_probabilities.cbegin() + (d + 1)*nn, temp_probability.begin(), temp_probability.begin() + nn, perplexity, 200, 1e-5, 0 ); distance_based_probabilities[d*nn] = 0; for(unsigned_int_type n = 1; n < nn; ++n){ distance_based_probabilities[d*nn+n] = temp_probability[n]; } } //#ifdef __USE_GCD__ // ); //#endif } } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::initializeFirstScale(){ utils::secureLog(_logger,"Initializing the first scale..."); _hierarchy.clear(); _hierarchy.push_back(Scale()); Scale& scale = _hierarchy[0]; scalar_vector_type distance_based_probabilities; std::vector<int> neighborhood_graph; computeNeighborhoodGraph(distance_based_probabilities, neighborhood_graph); unsigned_int_type nn = _params._num_neighbors + 1; { utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._init_fmc_time); utils::secureLog(_logger,"Creating transition matrix..."); scale._landmark_to_original_data_idx.resize(_num_dps); std::iota(scale._landmark_to_original_data_idx.begin(), scale._landmark_to_original_data_idx.end(), 0); scale._landmark_to_previous_scale_idx = scale._landmark_to_original_data_idx; scale._landmark_weight.resize(_num_dps,1); scale._transition_matrix.resize(_num_dps); //#ifdef __USE_GCD__ // std::cout << "GCD dispatch, hierarchical_sne_inl 253.\n"; // dispatch_apply(_num_dps, dispatch_get_global_queue(0, 0), ^(size_t i) { //#else #pragma omp parallel for for(int i = 0; i < _num_dps; ++i){ //#endif //__USE_GCD__ scalar_type sum = 0; for(int n = 1; n < nn; ++n){ int idx = i*nn+n; auto v = distance_based_probabilities[idx]; sum += v; scale._transition_matrix[i][neighborhood_graph[idx]] = v; } } //#ifdef __USE_GCD__ // ); //#endif } utils::secureLogValue(_logger,"Min memory requirements (MB)",scale.mimMemoryOccupation()); } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::initializeFirstScale(const sparse_scalar_matrix_type& similarities){ utils::secureLog(_logger,"Initializing the first scale..."); _hierarchy.clear(); _hierarchy.push_back(Scale()); Scale& scale = _hierarchy[0]; { utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._init_fmc_time); utils::secureLog(_logger,"Creating transition matrix..."); scale._landmark_to_original_data_idx.resize(_num_dps); std::iota(scale._landmark_to_original_data_idx.begin(), scale._landmark_to_original_data_idx.end(), 0); scale._landmark_to_previous_scale_idx = scale._landmark_to_original_data_idx; scale._landmark_weight.resize(_num_dps,1); scale._transition_matrix = similarities; } utils::secureLogValue(_logger,"Min memory requirements (MB)",scale.mimMemoryOccupation()); } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::selectLandmarks(const Scale& previous_scale, Scale& scale, unsigned_int_type& selected_landmarks){ utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._landmarks_selection_time); utils::secureLog(_logger,"Landmark selection with fixed reduction..."); const unsigned_int_type previous_scale_dp = previous_scale._transition_matrix.size(); const unsigned_int_type num_landmarks = previous_scale_dp*_params._rs_reduction_factor_per_layer; std::default_random_engine generator(seed()); std::uniform_int_distribution<> distribution_int(0, previous_scale_dp-1); std::uniform_real_distribution<double> distribution_real(0.0, 1.0); scale._landmark_to_original_data_idx.resize(num_landmarks,0); scale._landmark_to_previous_scale_idx.resize(num_landmarks,0); scale._landmark_weight.resize(num_landmarks,0); scale._previous_scale_to_landmark_idx.resize(previous_scale_dp,-1); scale._transition_matrix.resize(num_landmarks); scale._area_of_influence.resize(previous_scale_dp); int num_tries = 0; selected_landmarks = 0; while(selected_landmarks < num_landmarks){ ++num_tries; int idx = distribution_int(generator); assert(idx >= 0); assert(idx < _num_dps); if(_params._rs_outliers_removal_jumps > 0){ idx = randomWalk(idx,_params._rs_outliers_removal_jumps,previous_scale._transition_matrix,distribution_real,generator); } if(scale._previous_scale_to_landmark_idx[idx] != -1){ continue; } scale._previous_scale_to_landmark_idx[idx] = selected_landmarks; scale._landmark_to_original_data_idx[selected_landmarks] = previous_scale._landmark_to_original_data_idx[idx]; scale._landmark_to_previous_scale_idx[selected_landmarks] = idx; ++selected_landmarks; } _statistics._landmarks_selection_num_walks = num_tries*_params._rs_outliers_removal_jumps; } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::selectLandmarksWithStationaryDistribution(const Scale& previous_scale, Scale& scale, unsigned_int_type& selected_landmarks){ utils::secureLog(_logger,"Landmark selection..."); const unsigned_int_type previous_scale_dp = previous_scale._transition_matrix.size(); int count = 0; int thresh = _params._mcmcs_num_walks * _params._mcmcs_landmark_thresh; //__block std::vector<unsigned_int_type> importance_sampling(previous_scale_dp,0); std::vector<unsigned_int_type> importance_sampling(previous_scale_dp,0); { utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._mcmc_sampling_time); //__block std::default_random_engine generator(seed()); //__block std::uniform_real_distribution<double> distribution_real(0.0, 1.0); std::default_random_engine generator(seed()); std::uniform_real_distribution<double> distribution_real(0.0, 1.0); selected_landmarks = 0; utils::secureLog(_logger,"Monte Carlo Approximation..."); unsigned_int_type invalid = std::numeric_limits<unsigned_int_type>::max(); //#ifdef __USE_GCD__ // std::cout << "GCD dispatch, hierarchical_sne_inl 391.\n"; // dispatch_apply(previous_scale_dp, dispatch_get_global_queue(0, 0), ^(size_t d) { //#else #pragma omp parallel for for(int d = 0; d < previous_scale_dp; ++d){ //#endif //__USE_GCD__ for(int p = 0; p < _params._mcmcs_num_walks; ++p){ int idx = d; idx = randomWalk(idx,_params._mcmcs_walk_length,previous_scale._transition_matrix,distribution_real,generator); if(idx != invalid){ ++importance_sampling[idx]; } } } //#ifdef __USE_GCD__ // ); //#endif // cheap hack to get the hard cutoff in, still computes the data driven part which should probably be replaced... if (_params._hard_cut_off) { std::vector<unsigned_int_type> importance_sampling_sort = importance_sampling; std::sort(importance_sampling_sort.begin(), importance_sampling_sort.end()); unsigned_int_type cutoff = importance_sampling_sort[(importance_sampling_sort.size()-1) * (1.0f - _params._hard_cut_off_percentage)]; thresh = cutoff; } _statistics._landmarks_selection_num_walks = previous_scale_dp*_params._mcmcs_num_walks; for(int i = 0; i < previous_scale_dp; ++i){ if(importance_sampling[i] > thresh) ++count; } } { utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._landmarks_selection_time); utils::secureLog(_logger,"Selection..."); scale._previous_scale_to_landmark_idx.resize(previous_scale_dp,-1); scale._area_of_influence.resize(previous_scale_dp); scale._landmark_to_original_data_idx.resize(count); scale._landmark_to_previous_scale_idx.resize(count); scale._landmark_weight.resize(count); scale._transition_matrix.resize(count); selected_landmarks = 0; for(int i = 0; i < previous_scale_dp; ++i){ if(importance_sampling[i] > thresh){ scale._previous_scale_to_landmark_idx[i] = selected_landmarks; scale._landmark_to_original_data_idx[selected_landmarks] = previous_scale._landmark_to_original_data_idx[i]; scale._landmark_to_previous_scale_idx[selected_landmarks] = i; ++selected_landmarks; } } } } template <typename scalar_type, typename sparse_scalar_matrix_type> bool HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::addScaleImpl(){ utils::ScopedTimer<scalar_type, utils::Seconds> timer_tot(_statistics._total_time); utils::secureLog(_logger,"Add a new scale ..."); _hierarchy.push_back(Scale()); Scale& scale = _hierarchy[_hierarchy.size()-1]; Scale& previous_scale = _hierarchy[_hierarchy.size()-2]; const unsigned_int_type previous_scale_dp = previous_scale._landmark_to_original_data_idx.size(); // Landmark selection unsigned_int_type selected_landmarks = 0; if(_params._monte_carlo_sampling){ selectLandmarksWithStationaryDistribution(previous_scale,scale,selected_landmarks); }else{ selectLandmarks(previous_scale,scale,selected_landmarks); } utils::secureLogValue(_logger,"\t#landmarks",selected_landmarks); {//Area of influence //__block std::default_random_engine generator(seed()); //__block std::uniform_real_distribution<double> distribution_real(0.0, 1.0); std::default_random_engine generator(seed()); std::uniform_real_distribution<double> distribution_real(0.0, 1.0); const unsigned_int_type max_jumps = 100;//1000.*selected_landmarks/previous_scale_dp; const unsigned_int_type walks_per_dp = _params._num_walks_per_landmark; utils::secureLog(_logger,"\tComputing area of influence..."); { utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._aoi_time); //__block unsigned_int_type num_elem_in_Is(0); unsigned_int_type num_elem_in_Is(0); //#ifdef __USE_GCD__ // std::cout << "GCD dispatch, hierarchical_sne_inl 473.\n"; // dispatch_queue_t criticalQueue = dispatch_queue_create("critical", NULL); // dispatch_apply(previous_scale_dp, dispatch_get_global_queue(0, 0), ^(size_t d) { //#else #pragma omp parallel for for(int d = 0; d < previous_scale_dp; ++d){ //#endif //__USE_GCD__ std::unordered_map<unsigned_int_type, unsigned_int_type> landmarks_reached; for(int i = 0; i < walks_per_dp; ++i){ auto res = randomWalk(d,scale._previous_scale_to_landmark_idx,max_jumps,previous_scale._transition_matrix,distribution_real,generator); if(res != -1){ ++landmarks_reached[scale._previous_scale_to_landmark_idx[res]]; }else{ //--i; } } //#ifdef __USE_GCD__ // dispatch_sync(criticalQueue, ^ //#else #pragma omp critical //#endif { num_elem_in_Is += landmarks_reached.size(); for(auto l: landmarks_reached){ for(auto other_l: landmarks_reached){ //to avoid that the sparsity of the matrix it is much different from the effective sparsity if(l.second <= _params._transition_matrix_prune_thresh || other_l.second <= _params._transition_matrix_prune_thresh) continue; if(l.first != other_l.first){ scale._transition_matrix[l.first][other_l.first] += l.second * other_l.second * previous_scale._landmark_weight[d]; } } } for(auto l: landmarks_reached){ const scalar_type prob = scalar_type(l.second)/walks_per_dp; scale._area_of_influence[d][l.first] = prob; scale._landmark_weight[l.first] += prob * previous_scale._landmark_weight[d]; } } //#ifdef __USE_GCD__ // ); //#endif } //#ifdef __USE_GCD__ // ); //#endif _statistics._aoi_num_walks = previous_scale_dp * walks_per_dp; _statistics._aoi_sparsity = 1 - scalar_type(num_elem_in_Is) / (previous_scale_dp*selected_landmarks); } { utils::secureLog(_logger,"\tComputing finite markov chain..."); utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._fmc_time); unsigned_int_type num_elem_in_Ts(0); unsigned_int_type num_effective_elem_in_Ts(0); for(int l = 0; l < scale._transition_matrix.size(); ++l){ num_elem_in_Ts += scale._transition_matrix[l].size(); scalar_type sum(0); for(auto& e: scale._transition_matrix[l]){ sum += e.second; } for(auto& e: scale._transition_matrix[l]){ e.second /= sum; if(e.second > 0.01){ ++num_effective_elem_in_Ts; } } } _statistics._fmc_sparsity = 1 - scalar_type(num_elem_in_Ts) / (selected_landmarks*selected_landmarks); _statistics._fmc_effective_sparsity = 1 - scalar_type(num_effective_elem_in_Ts) / (selected_landmarks*selected_landmarks); } } utils::secureLogValue(_logger,"Min memory requirements (MB)",scale.mimMemoryOccupation()); return true; } template <typename scalar_type, typename sparse_scalar_matrix_type> bool HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::addScaleOutOfCoreImpl(){ typedef typename sparse_scalar_matrix_type::value_type map_type; typedef typename map_type::key_type key_type; typedef typename map_type::mapped_type mapped_type; typedef hdi::data::MapHelpers<key_type,mapped_type,map_type> map_helpers_type; utils::ScopedTimer<scalar_type, utils::Seconds> timer_tot(_statistics._total_time); utils::secureLog(_logger,"Add a new scale with out-of-core implementation ..."); _hierarchy.push_back(Scale()); Scale& scale = _hierarchy[_hierarchy.size()-1]; Scale& previous_scale = _hierarchy[_hierarchy.size()-2]; const unsigned_int_type previous_scale_dp = previous_scale._landmark_to_original_data_idx.size(); // Landmark selection unsigned_int_type selected_landmarks = 0; if(_params._monte_carlo_sampling){ selectLandmarksWithStationaryDistribution(previous_scale,scale,selected_landmarks); }else{ selectLandmarks(previous_scale,scale,selected_landmarks); } utils::secureLogValue(_logger,"\t#landmarks",selected_landmarks); {//Area of influence std::default_random_engine generator(seed()); std::uniform_real_distribution<double> distribution_real(0.0, 1.0); const unsigned_int_type max_jumps = 200;//1000.*selected_landmarks/previous_scale_dp; const unsigned_int_type walks_per_dp = _params._num_walks_per_landmark; utils::secureLog(_logger,"\tComputing area of influence..."); { utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._aoi_time); int d = 0; unsigned_int_type num_elem_in_Is(0); { utils::LogProgress progress(_verbose?_logger:nullptr); progress.setNumSteps(previous_scale_dp); progress.setNumTicks(previous_scale_dp/50000); progress.setName("Area of influence"); progress.start(); //#ifdef __USE_GCD__ // std::cout << "GCD dispatch, hierarchical_sne_inl 587.\n"; // dispatch_queue_t criticalQueue = dispatch_queue_create("critical", NULL); // dispatch_apply(previous_scale_dp, dispatch_get_global_queue(0, 0), ^(size_t d) { //#else #pragma omp parallel for for(int d = 0; d < previous_scale_dp; ++d){ //#endif //__USE_GCD__ //map because it must be ordered for the initialization of the maps std::map<unsigned_int_type, scalar_type> landmarks_reached; for(int i = 0; i < walks_per_dp; ++i){ auto res = randomWalk(d,scale._previous_scale_to_landmark_idx,max_jumps,previous_scale._transition_matrix,distribution_real,generator); if(res != -1){ ++landmarks_reached[scale._previous_scale_to_landmark_idx[res]]; }else{ //--i; } } //normalization for(auto& l: landmarks_reached){ l.second = scalar_type(l.second)/walks_per_dp; } //saving aoi map_helpers_type::initialize(scale._area_of_influence[d],landmarks_reached.begin(),landmarks_reached.end()); map_helpers_type::shrinkToFit(scale._area_of_influence[d]); progress.step(); } //#ifdef __USE_GCD__ // ); //#endif progress.finish(); } utils::secureLog(_logger,"\tCaching weights..."); //caching of the weights for(d = 0; d < previous_scale_dp; ++d){ num_elem_in_Is += scale._area_of_influence[d].size(); for(auto& e: scale._area_of_influence[d]){ scale._landmark_weight[e.first] += e.second; } } utils::secureLog(_logger,"\tInverting the AoI matrix..."); //Inverse AoI -> critical for the computation time sparse_scalar_matrix_type inverse_aoi; map_helpers_type::invert(scale._area_of_influence,inverse_aoi); utils::secureLog(_logger,"\tComputing similarities..."); //Similarities -> compute the overlap of the area of influence { utils::LogProgress progress(_verbose?_logger:nullptr); progress.setNumSteps(scale._transition_matrix.size()); progress.setNumTicks(scale._transition_matrix.size()/5000); progress.setName("Similarities"); progress.start(); // #ifdef __USE_GCD__ // std::cout << "GCD dispatch, hierarchical_sne_inl 602.\n"; // dispatch_apply(scale._transition_matrix.size(), dispatch_get_global_queue(0, 0), ^(size_t l) { // #else #pragma omp parallel for for(int l = 0; l < scale._transition_matrix.size(); ++l){ // #endif //__USE_GCD__ //ordered for efficient initialization std::map<typename sparse_scalar_matrix_type::value_type::key_type, typename sparse_scalar_matrix_type::value_type::mapped_type> temp_trans_mat; // use map here for(const auto& d: inverse_aoi[l]){ for(const auto& aoi: scale._area_of_influence[d.first]){ double single_landmark_thresh = (1./100.)*_params._transition_matrix_prune_thresh; if(l != aoi.first){ if(d.second <= single_landmark_thresh || aoi.second <= single_landmark_thresh) continue; temp_trans_mat[aoi.first] += d.second * aoi.second * previous_scale._landmark_weight[d.first]; } } } //normalization double sum = 0; for(auto& v: temp_trans_mat){sum += v.second;} for(auto& v: temp_trans_mat){v.second /= sum;} auto scale_size = scale.size(); //removed the threshold depending on the scale -> it makes sense to remove only uneffective neighbors based at every scale -> memory is still under control map_helpers_type::initialize(scale._transition_matrix[l],temp_trans_mat.begin(),temp_trans_mat.end(), 0.001); map_helpers_type::shrinkToFit(scale._transition_matrix[l]); progress.step(); } // #ifdef __USE_GCD__ // ); // #endif progress.finish(); } _statistics._aoi_num_walks = previous_scale_dp * walks_per_dp; _statistics._aoi_sparsity = 1 - scalar_type(num_elem_in_Is) / (previous_scale_dp*selected_landmarks); } { utils::secureLog(_logger,"\tComputing finite markov chain..."); utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._fmc_time); unsigned_int_type num_elem_in_Ts(0); unsigned_int_type num_effective_elem_in_Ts(0); for(int l = 0; l < scale._transition_matrix.size(); ++l){ num_elem_in_Ts += scale._transition_matrix[l].size(); scalar_type sum(0); for(auto& e: scale._transition_matrix[l]){ sum += e.second; } for(auto& e: scale._transition_matrix[l]){ e.second /= sum; if(e.second > 0.001){ ++num_effective_elem_in_Ts; } } } _statistics._fmc_sparsity = 1 - scalar_type(num_elem_in_Ts) / (selected_landmarks*selected_landmarks); _statistics._fmc_effective_sparsity = 1 - scalar_type(num_effective_elem_in_Ts) / (selected_landmarks*selected_landmarks); } } return true; } template <typename scalar_type, typename sparse_scalar_matrix_type> typename HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::unsigned_int_type HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::seed()const{ return(_params._seed>0)?static_cast<unsigned_int_type>(_params._seed):std::chrono::system_clock::now().time_since_epoch().count(); } /////////////////////////////////////////////////////////////////// template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::getInfluencedLandmarksInPreviousScale(unsigned_int_type scale_id, std::vector<unsigned_int_type>& idxes, std::map<unsigned_int_type,scalar_type>& neighbors)const{ neighbors.clear(); std::unordered_set<unsigned_int_type> set_idxes; set_idxes.insert(idxes.begin(),idxes.end()); auto not_found = set_idxes.end(); for(int d = 0; d < _hierarchy[scale_id]._area_of_influence.size(); ++d){ double probability = 0; for(auto& v: _hierarchy[scale_id]._area_of_influence[d]){ if(set_idxes.find(v.first) != not_found){ probability += v.second; } } if(probability > 0){ neighbors[d] = probability; } } } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::getInfluencingLandmarksInNextScale(unsigned_int_type scale_id, std::vector<unsigned_int_type>& idxes, std::map<unsigned_int_type, scalar_type>& neighbors)const{ neighbors.clear(); int next_scale_id = scale_id + 1; if (next_scale_id + 1 > _hierarchy.size()) return; std::map<unsigned_int_type, scalar_type> completeSet; for (int i = 0; i < idxes.size(); i++) { for (auto& v : _hierarchy[next_scale_id]._area_of_influence[idxes[i]]){ neighbors[v.first] += v.second; } } for (int i = 0; i < _hierarchy[next_scale_id]._area_of_influence.size(); i++) { for (auto& v : _hierarchy[next_scale_id]._area_of_influence[i]){ completeSet[v.first] += v.second; } } for (auto& v : neighbors) { neighbors[v.first] /= completeSet[v.first]; } } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::getInterpolationWeights(sparse_scalar_matrix_type& influence, int scale)const{ influence.clear(); influence.resize(_num_dps); scale = (scale<0)?(_hierarchy.size()-1):scale; checkAndThrowLogic(scale < _hierarchy.size(),"getInterpolationWeights: Invalid scale"); //#ifdef __USE_GCD__ // std::cout << "GCD dispatch, hierarchical_sne_inl 724.\n"; // dispatch_apply(_num_dps, dispatch_get_global_queue(0, 0), ^(size_t i) { //#else #pragma omp parallel for for(int i = 0; i < _num_dps; ++i){ //#endif //__USE_GCD__ influence[i] = _hierarchy[1]._area_of_influence[i]; for(int s = 2; s <= scale; ++s){ typename sparse_scalar_matrix_type::value_type temp_link; for(auto l: influence[i]){ for(auto new_l: _hierarchy[s]._area_of_influence[l.first]){ temp_link[new_l.first] += l.second * new_l.second; } } influence[i] = temp_link; } } //#ifdef __USE_GCD__ // ); //#endif } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::getInterpolationWeights(const std::vector<unsigned int>& data_points, sparse_scalar_matrix_type& influence, int scale)const{ auto n = data_points.size(); influence.clear(); influence.resize(n); scale = (scale<0)?(_hierarchy.size()-1):scale; checkAndThrowLogic(scale < _hierarchy.size(),"getInterpolationWeights: Invalid scale"); //#ifdef __USE_GCD__ // std::cout << "GCD dispatch, hierarchical_sne_inl 755.\n"; // dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t i) { //#else #pragma omp parallel for for(int i = 0; i < n; ++i){ //#endif //__USE_GCD__ influence[i] = _hierarchy[1]._area_of_influence[data_points[i]]; for(int s = 2; s <= scale; ++s){ typename sparse_scalar_matrix_type::value_type temp_link; for(auto l: influence[i]){ for(auto new_l: _hierarchy[s]._area_of_influence[l.first]){ temp_link[new_l.first] += l.second * new_l.second; } } influence[i] = temp_link; } } //#ifdef __USE_GCD__ // ); //#endif } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type, sparse_scalar_matrix_type>::getInfluenceOnDataPoint(unsigned_int_type dp, std::vector<std::unordered_map<unsigned_int_type, scalar_type>>& influence, scalar_type thresh, bool normalized)const{ assert(dp < _hierarchy[0].size()); influence.resize(_hierarchy.size()); influence[0][dp] = 1; //Hey it's me! if(influence.size() == 1){ return; } for(auto& v: _hierarchy[1]._area_of_influence[dp]){ influence[1][v.first] = v.second; } if (normalized) { double sum = 0; for(auto& v: influence[1]){sum += v.second;} for(auto& v: influence[1]){v.second /= sum;} } for(int s = 2; s < _hierarchy.size(); ++s){ for(auto l: influence[s-1]){ if(l.second >= thresh){ for(auto new_l: _hierarchy[s]._area_of_influence[l.first]){ influence[s][new_l.first] += l.second * new_l.second; } } } if (normalized) { double sum = 0; for (auto& v : influence[s]){ sum += v.second; } for (auto& v : influence[s]){ v.second /= sum; } } } } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::getStochasticLocationAtHigherScale(unsigned_int_type orig_scale, unsigned_int_type dest_scale, const std::vector<unsigned_int_type>& subset_orig_scale, sparse_scalar_matrix_type& closeness)const{ checkAndThrowLogic(dest_scale > orig_scale,"getStochasticLocationAtHigherScale (0)"); checkAndThrowLogic(orig_scale < _hierarchy.size()-1,"getStochasticLocationAtHigherScale (2)"); checkAndThrowLogic(dest_scale < _hierarchy.size(),"getStochasticLocationAtHigherScale (3)"); closeness.clear(); closeness.resize(subset_orig_scale.size()); //#ifdef __USE_GCD__ // std::cout << "GCD dispatch, hierarchical_sne_inl 814.\n"; // dispatch_apply(subset_orig_scale.size(), dispatch_get_global_queue(0, 0), ^(size_t i) { //#else #pragma omp parallel for for(int i = 0; i < subset_orig_scale.size(); ++i){ //#endif //__USE_GCD__ assert(subset_orig_scale[i] < _hierarchy[orig_scale+1]._area_of_influence.size()); closeness[i] = _hierarchy[orig_scale+1]._area_of_influence[subset_orig_scale[i]]; for(int s = orig_scale+2; s <= dest_scale; ++s){ typename sparse_scalar_matrix_type::value_type temp_link; for(auto l: closeness[i]){ for(auto new_l: _hierarchy[s]._area_of_influence[l.first]){ temp_link[new_l.first] += l.second * new_l.second; } } closeness[i] = temp_link; } } //#ifdef __USE_GCD__ // ); //#endif } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::getAreaOfInfluence(unsigned_int_type scale_id, const std::vector<unsigned_int_type>& selection, std::vector<scalar_type>& aoi)const{ typedef typename sparse_scalar_matrix_type::value_type map_type; typedef typename map_type::key_type key_type; typedef typename map_type::mapped_type mapped_type; typedef hdi::data::MapHelpers<key_type,mapped_type,map_type> map_helpers_type; checkAndThrowLogic(scale_id < _hierarchy.size(),"getAreaOfInfluence (3)"); aoi.clear(); aoi.resize(scale(0).size(),0); std::unordered_set<unsigned int> set_selected_idxes; set_selected_idxes.insert(selection.begin(),selection.end()); if(scale_id == 0){ for(int i = 0; i < selection.size(); ++i){ aoi[selection[i]] = 1; } }else{ //#ifdef __USE_GCD__ // std::cout << "GCD dispatch, hierarchical_sne_inl 854.\n"; // dispatch_apply(scale(0).size(), dispatch_get_global_queue(0, 0), ^(size_t i) { //#else #pragma omp parallel for for(int i = 0; i < scale(0).size(); ++i){ //#endif //__USE_GCD__ typename sparse_scalar_matrix_type::value_type closeness = scale(1)._area_of_influence[i]; for(int s = 2; s <= scale_id; ++s){ std::map<key_type,mapped_type> temp_link; for(auto l: closeness){ for(auto new_l: scale(s)._area_of_influence[l.first]){ temp_link[new_l.first] += l.second * new_l.second; } } closeness.clear(); map_helpers_type::initialize(closeness,temp_link.begin(),temp_link.end()); } for(auto e: closeness){ if(set_selected_idxes.find(e.first) != set_selected_idxes.end()){ aoi[i] += e.second; } } } //#ifdef __USE_GCD__ // ); //#endif } } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::getAreaOfInfluenceTopDown(unsigned_int_type scale_id, const std::vector<unsigned_int_type>& selection, std::vector<scalar_type>& aoi)const{ typedef typename sparse_scalar_matrix_type::value_type map_type; typedef typename map_type::key_type key_type; typedef typename map_type::mapped_type mapped_type; typedef hdi::data::MapHelpers<key_type,mapped_type,map_type> map_helpers_type; checkAndThrowLogic(scale_id < _hierarchy.size(),"getAreaOfInfluenceTopDown (3)"); aoi.clear(); aoi.resize(scale(0).size(),0); std::unordered_set<unsigned int> set_selected_idxes; set_selected_idxes.insert(selection.begin(),selection.end()); if(scale_id == 0){ for(int i = 0; i < selection.size(); ++i){ aoi[selection[i]] = 1; } }else{ std::vector<unsigned_int_type> scale_selection = selection; for(int s = scale_id; s > 0; --s){ std::map<unsigned_int_type, scalar_type> neighbors; getInfluencedLandmarksInPreviousScale(s,scale_selection,neighbors); scale_selection.clear(); for(auto neigh: neighbors){ if(neigh.second > 0.3){ //TODO scale_selection.push_back(neigh.first); } } } for(int i = 0; i < scale_selection.size(); ++i){ aoi[scale_selection[i]] = 1; } } } /////////////////////////////////////////////////////////////////// /// RANDOM WALKS /////////////////////////////////////////////////////////////////// //Compute a random walk using a transition matrix template <typename scalar_type, typename sparse_scalar_matrix_type> typename HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::unsigned_int_type HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::randomWalk(unsigned_int_type starting_point, unsigned_int_type max_length, const sparse_scalar_matrix_type& transition_matrix, std::uniform_real_distribution<double>& distribution, std::default_random_engine& generator){ unsigned_int_type dp_idx = starting_point; int walk_length = 0; do{ const double rnd_num = distribution(generator); unsigned_int_type idx_knn = dp_idx; double incremental_prob = 0; for(auto& elem: transition_matrix[dp_idx]){ incremental_prob += elem.second; if(rnd_num < incremental_prob){ idx_knn = elem.first; break; } } //assert(idx_knn != dp_idx); if(idx_knn == dp_idx){ return std::numeric_limits<unsigned_int_type>::max(); // std::cout << "DISCONNECTED!" << std::endl; } dp_idx = idx_knn; ++walk_length; } while(walk_length <= max_length); return dp_idx; } //!Compute a random walk using a transition matrix template <typename scalar_type, typename sparse_scalar_matrix_type> int HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::randomWalk(unsigned_int_type starting_point, const std::vector<int>& stopping_points, unsigned_int_type max_length, const sparse_scalar_matrix_type& transition_matrix, std::uniform_real_distribution<double>& distribution, std::default_random_engine& generator){ unsigned_int_type dp_idx = starting_point; int walk_length = 0; do{ const double rnd_num = distribution(generator); unsigned_int_type idx_knn = dp_idx; double incremental_prob = 0; for(auto& elem: transition_matrix[dp_idx]){ incremental_prob += elem.second; if(rnd_num < incremental_prob){ idx_knn = elem.first; break; } } //assert(idx_knn != dp_idx); if(idx_knn == dp_idx){ return -1; std::cout << "42!" << std::endl; } dp_idx = idx_knn; ++walk_length; } while(stopping_points[dp_idx] == -1 && walk_length <= max_length); if(walk_length > max_length){ return -1; } return static_cast<int>(dp_idx); } //////////////////////////////////////////////////////////////////////////////////// template <typename scalar_type, typename sparse_scalar_matrix_type> typename HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::int_type HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::ClusterTree::getFreeClusterId(unsigned_int_type scale_id){ int_type max = std::numeric_limits<int_type>::max(); for(int_type i = 0; i < max; ++i){ for(int j = 0; j < _cluster_tree[scale_id].size(); ++j){ if(i!=_cluster_tree[scale_id][j].id()){ return i; } } } return 0; } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::ClusterTree::addCluster(unsigned_int_type scale_id, const cluster_type& cluster){ checkAndThrowLogic(scale_id < _cluster_tree.size(), "ClusterHierarchy::addCluster: invalid scale"); for(int j = 0; j < _cluster_tree[scale_id].size(); ++j){ checkAndThrowLogic(cluster.id()!=_cluster_tree[scale_id][j].id(),"ClusterHierarchy::addCluster: duplicated id"); } if(scale_id == _cluster_tree.size()-1){ checkAndThrowLogic(cluster.parent_id()==Cluster::NULL_LINK,"ClusterHierarchy::addCluster: root clusters must have parent_id = Cluster::NULL_LINK"); }else{ checkAndThrowLogic(cluster.parent_id()!=Cluster::NULL_LINK,"ClusterHierarchy::addCluster: non-root clusters must have parent_id != Cluster::NULL_LINK"); } _cluster_tree[scale_id].push_back(cluster); } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::ClusterTree::removeCluster(unsigned_int_type scale_id, int_type cluster_id){ checkAndThrowLogic(scale_id < _cluster_tree.size(), "ClusterHierarchy::removeCluster: invalid scale"); for(int i = 0; i < _cluster_tree[scale_id].size(); ++i){ if(_cluster_tree[scale_id][i].id() == cluster_id){ _cluster_tree[scale_id].erase(_cluster_tree[scale_id].begin()+i); break; } } } template <typename scalar_type, typename sparse_scalar_matrix_type> bool HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::ClusterTree::hasClusterId(unsigned_int_type scale_id, int_type cluster_id)const{ checkAndThrowLogic(scale_id < _cluster_tree.size(), "ClusterHierarchy::hasClusterId: invalid scale"); for(int j = 0; j < _cluster_tree[scale_id].size(); ++j){ if(cluster_id==_cluster_tree[scale_id][j].id()){return true;} } return false; } template <typename scalar_type, typename sparse_scalar_matrix_type> const typename HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::ClusterTree::cluster_type& HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::ClusterTree::cluster(unsigned_int_type scale_id, int_type cluster_id)const{ checkAndThrowLogic(hasClusterId(scale_id, cluster_id), "ClusterHierarchy::cluster: invalid cluster"); for(int j = 0; j < _cluster_tree[scale_id].size(); ++j){ if(cluster_id==_cluster_tree[scale_id][j].id()){ return _cluster_tree[scale_id][j]; } } throw std::logic_error("Invalid cluster"); //return cluster_type(); //INVALID } template <typename scalar_type, typename sparse_scalar_matrix_type> bool HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::ClusterTree::checkCluterConsistency(const HierarchicalSNE& hsne, unsigned_int_type scale_id, int_type cluster_id){ checkAndThrowLogic(hasClusterId(scale_id, cluster_id), "ClusterHierarchy::checkCluterConsistency: invalid cluster"); if(scale_id == _cluster_tree.size()-1){ std::stringstream ss; ss << "Validating cluster " << cluster_id << " at scale " << scale_id << ":\tis a root node => valid"; utils::secureLog(_logger,ss.str()); return true; } int_type cluster_id_in_vector = -1; for(int j = 0; j < _cluster_tree[scale_id].size(); ++j){ if(cluster_id==_cluster_tree[scale_id][j].id()){ cluster_id_in_vector = j; } } std::vector<scalar_type> influence(_cluster_tree[scale_id+1].size(),0); scalar_type unclustered_influence(0); auto& scale = hsne.scale(scale_id+1); for(auto e: _cluster_tree[scale_id][cluster_id_in_vector].landmarks()){ for(auto aoi: scale._area_of_influence[e]){ bool found = false; for(int i = 0; i < influence.size(); ++i){ auto it = _cluster_tree[scale_id+1][i].landmarks().find(aoi.first); if(it != _cluster_tree[scale_id+1][i].landmarks().end()){ influence[i] += aoi.second; found = true; } } if(!found){ unclustered_influence += aoi.second; } } } std::stringstream ss; ss << "Validating cluster " << cluster_id << " at scale " << scale_id << " with parent " << _cluster_tree[scale_id][cluster_id_in_vector].parent_id() << " (" << _cluster_tree[scale_id][cluster_id_in_vector].notes() << ")" << std::endl; ss << "\tUnclusterd:\t" << unclustered_influence << std::endl; scalar_type max(unclustered_influence); int_type res_id(-1); for(int i = 0; i < influence.size(); ++i){ ss << "\tCluster-" << _cluster_tree[scale_id+1][i].id() << " (" << _cluster_tree[scale_id+1][i].notes() << ") :\t" << influence[i] << std::endl; if(influence[i] > max){ max = influence[i]; res_id = _cluster_tree[scale_id+1][i].id(); } } utils::secureLog(_logger,ss.str()); if(res_id == _cluster_tree[scale_id][cluster_id_in_vector].parent_id()){ utils::secureLog(_logger,"Valid"); return true; } utils::secureLog(_logger,"INVALID!"); return false; } template <typename scalar_type, typename sparse_scalar_matrix_type> bool HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::ClusterTree::checkTreeConsistency(const HierarchicalSNE& hsne){ bool res = true; for(int s = _cluster_tree.size()-1; s >= 0 ; --s){ for(int c = 0; c < _cluster_tree[s].size(); ++c){ res &= checkCluterConsistency(hsne,s,_cluster_tree[s][c].id()); } } return res; } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::ClusterTree::computePointToClusterAssociation(const HierarchicalSNE& hsne, unsigned_int_type pnt_id, std::tuple<unsigned_int_type,int_type,scalar_type>& res){ std::vector<std::unordered_map<unsigned_int_type,scalar_type>> influence; hsne.getInfluenceOnDataPoint(pnt_id,influence); res = std::tuple<unsigned_int_type,int_type,scalar_type>(_cluster_tree.size()-1,-1,1); std::vector<unsigned_int_type> clusters_to_analyze(_cluster_tree[_cluster_tree.size()-1].size()); std::iota(clusters_to_analyze.begin(),clusters_to_analyze.end(),0); //just for test for(int s = _cluster_tree.size()-1; s >= 0 && clusters_to_analyze.size(); --s){ unsigned_int_type scale_id = s; std::vector<scalar_type> cluster_influence(clusters_to_analyze.size(),0); scalar_type unclustered_influence(0); for(auto aoi: influence[scale_id]){ bool found = false; for(int i = 0; i < clusters_to_analyze.size(); ++i){ auto it = _cluster_tree[scale_id][clusters_to_analyze[i]].landmarks().find(aoi.first); if(it != _cluster_tree[scale_id][clusters_to_analyze[i]].landmarks().end()){ cluster_influence[i] += aoi.second; found = true; } } if(!found){ unclustered_influence += aoi.second; } } scalar_type max(unclustered_influence); int_type cluster_id(-1); for(int i = 0; i < clusters_to_analyze.size(); ++i){ if(cluster_influence[i] > max){ max = cluster_influence[i]; cluster_id = _cluster_tree[scale_id][clusters_to_analyze[i]].id(); } } if(cluster_id == -1){ return; } res = std::tuple<unsigned_int_type,int_type,scalar_type>(scale_id,cluster_id,max); //compute children nodes clusters_to_analyze.clear(); if(s != 0){ for(int i = 0; i < _cluster_tree[s-1].size(); ++i){ if(_cluster_tree[s-1][i].parent_id() == cluster_id){ clusters_to_analyze.push_back(i); } } } } } template <typename scalar_type, typename sparse_scalar_matrix_type> void HierarchicalSNE<scalar_type,sparse_scalar_matrix_type>::ClusterTree::computePointsToClusterAssociation(const HierarchicalSNE& hsne, std::vector<std::tuple<unsigned_int_type,int_type,scalar_type>>& res){ res.resize(hsne.scale(0).size()); //#ifdef __USE_GCD__ // std::cout << "GCD dispatch, hierarchical_sne_inl 1227.\n"; // dispatch_apply(res.size(), dispatch_get_global_queue(0, 0), ^(size_t i) { //#else #pragma omp parallel for for(int i = 0; i < res.size(); ++i){ //#endif //__USE_GCD__ computePointToClusterAssociation(hsne,i,res[i]); } //#ifdef __USE_GCD__ // ); //#endif } ///////////////////////////////////////////////////////////////////////////////////7 namespace IO{ template <typename hsne_type, class output_stream_type> void saveHSNE(const hsne_type& hsne, output_stream_type& stream, utils::AbstractLog* log){ checkAndThrowLogic(hsne.hierarchy().size(),"Cannot save an empty H-SNE hierarchy!!!"); utils::secureLog(log, "Saving H-SNE hierarchy to file"); typedef float io_scalar_type; typedef float io_unsigned_int_type; //Version io_unsigned_int_type major_version = 0; io_unsigned_int_type minor_version = 0; stream.write(reinterpret_cast<char*>(&major_version),sizeof(io_unsigned_int_type)); stream.write(reinterpret_cast<char*>(&minor_version),sizeof(io_unsigned_int_type)); //Number of scales io_unsigned_int_type num_scales = static_cast<io_unsigned_int_type>(hsne.hierarchy().size()); stream.write(reinterpret_cast<char*>(&num_scales),sizeof(io_unsigned_int_type)); { //The first scale contains only the transition matrix auto& scale = hsne.scale(0); io_unsigned_int_type n = static_cast<io_unsigned_int_type>(scale.size()); utils::secureLogValue(log, "Saving scale",0); utils::secureLog(log, "\tsize",n); stream.write(reinterpret_cast<char*>(&n),sizeof(io_unsigned_int_type)); utils::secureLog(log, "\t... transition matrix ..."); data::IO::saveSparseMatrix(scale._transition_matrix,stream,log); } for(int s = 1; s < num_scales; ++s){ auto& scale = hsne.scale(s); io_unsigned_int_type n = static_cast<io_unsigned_int_type>(scale.size()); utils::secureLogValue(log, "Saving scale",s); utils::secureLogValue(log, "\tsize",n); stream.write(reinterpret_cast<char*>(&n),sizeof(io_unsigned_int_type)); utils::secureLog(log, "\t... transition matrix ..."); data::IO::saveSparseMatrix(scale._transition_matrix,stream,log); utils::secureLog(log, "\t... landmarks to original data ..."); data::IO::saveUIntVector(scale._landmark_to_original_data_idx,stream,log); utils::secureLog(log, "\t... landmarks to previous scale ..."); data::IO::saveUIntVector(scale._landmark_to_previous_scale_idx,stream,log); utils::secureLog(log, "\t... landmark weights ..."); data::IO::saveScalarVector(scale._landmark_weight,stream,log); utils::secureLog(log, "\t... previous scale to current scale landmarks ..."); data::IO::saveIntVector(scale._previous_scale_to_landmark_idx,stream,log); utils::secureLog(log, "\t... area of influence ..."); data::IO::saveSparseMatrix(scale._area_of_influence,stream,log); } } /////////////////////////////////////////////////////// template <typename hsne_type, class input_stream_type> void loadHSNE(hsne_type& hsne, input_stream_type& stream, utils::AbstractLog* log){ utils::secureLog(log, "Loading H-SNE hierarchy from file"); typedef float io_scalar_type; typedef float io_unsigned_int_type; //Version io_unsigned_int_type major_version = 0; io_unsigned_int_type minor_version = 0; stream.read(reinterpret_cast<char*>(&major_version),sizeof(io_unsigned_int_type)); stream.read(reinterpret_cast<char*>(&minor_version),sizeof(io_unsigned_int_type)); checkAndThrowRuntime(major_version == 0,"Invalid major version"); checkAndThrowRuntime(minor_version == 0,"Invalid minor version"); //Number of scales io_unsigned_int_type num_scales; stream.read(reinterpret_cast<char*>(&num_scales),sizeof(io_unsigned_int_type)); checkAndThrowRuntime(num_scales > 0 ,"Cannot load an empty hierarchy"); { hsne.hierarchy().clear(); hsne.hierarchy().push_back(typename hsne_type::Scale()); auto& scale = hsne.scale(0); io_unsigned_int_type n = static_cast<io_unsigned_int_type>(scale.size()); utils::secureLogValue(log, "Loading scale",0); stream.read(reinterpret_cast<char*>(&n),sizeof(io_unsigned_int_type)); utils::secureLog(log, "\tsize",n); utils::secureLog(log, "\t... transition matrix ..."); data::IO::loadSparseMatrix(scale._transition_matrix,stream,log); utils::secureLog(log, "\t... (init) landmarks to original data ..."); scale._landmark_to_original_data_idx.resize(n); std::iota(scale._landmark_to_original_data_idx.begin(),scale._landmark_to_original_data_idx.end(),0); utils::secureLog(log, "\t... (init) landmarks to previous scale ..."); scale._landmark_to_previous_scale_idx.resize(n); std::iota(scale._landmark_to_previous_scale_idx.begin(),scale._landmark_to_previous_scale_idx.end(),0); utils::secureLog(log, "\t... (init) landmark weights ..."); scale._landmark_weight.resize(n,1); } for(int s = 1; s < num_scales; ++s){ hsne.hierarchy().push_back(typename hsne_type::Scale()); auto& scale = hsne.scale(s); io_unsigned_int_type n; utils::secureLogValue(log, "Loading scale",s); stream.read(reinterpret_cast<char*>(&n),sizeof(io_unsigned_int_type)); utils::secureLogValue(log, "\tsize",n); utils::secureLog(log, "\t... transition matrix ..."); data::IO::loadSparseMatrix(scale._transition_matrix,stream,log); utils::secureLog(log, "\t... landmarks to original data ..."); data::IO::loadUIntVector(scale._landmark_to_original_data_idx,stream,log); utils::secureLog(log, "\t... landmarks to previous scale ..."); data::IO::loadUIntVector(scale._landmark_to_previous_scale_idx,stream,log); utils::secureLog(log, "\t... landmark weights ..."); data::IO::loadScalarVector(scale._landmark_weight,stream,log); utils::secureLog(log, "\t... previous scale to current scale landmarks ..."); data::IO::loadIntVector(scale._previous_scale_to_landmark_idx,stream,log); utils::secureLog(log, "\t... area of influence ..."); data::IO::loadSparseMatrix(scale._area_of_influence,stream,log); } } } } } #endif
brisched.h
#pragma omp parallel for collapse(2) for (long k = GZ / TILEK; k < (N + GZ) / TILEK; ++k) for (long j = GZ / TILEJ; j < (N + GZ) / TILEJ; ++j) for (long i = GZ / TILEI; i < (N + GZ) / TILEI; ++i)
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
switch-1.c
#include <assert.h> #define s 100 #pragma omp declare target int switch1 (int a) { switch (a) { case 1: return 11; case 33: return 333; case 55: return 55; default: return -1; } } int switch2 (int a) { switch (a) { case 1 ... 11: return 11; break; case 33: return 333; break; case 55: return 55; break; default: return -1; } } int switch3 (int a) { switch (a) { case 1 ... 11: return 11; case 12 ... 22: return 22; case 23 ... 33: return 33; case 34 ... 44: return 44; default: return 44; } } int switch4 (int a, int b) { switch (a) { case 1 ... 11: return a; case 12 ... 22: return b; case 23 ... 33: return a; case 34 ... 44: return b; default: return 12345; } } int switch5 (int a, int b) { switch (a) { case 1 ... 2: return 1; case 3 ... 4: return 2; case 5 ... 6: return 3; case 7 ... 11: return 4; } return -1; } #pragma omp end declare target int main (int argc) { int array[s]; #pragma omp target map(tofrom : array[:s]) { for (int i = 0; i < s; i++) array[i] = switch1 (i); } for (int i = 0; i < s; i++) assert (array[i] == switch1 (i)); #pragma omp target map(tofrom : array[:s]) { for (int i = 0; i < s; i++) array[i] = switch2 (i); } for (int i = 0; i < s; i++) assert (array[i] == switch2 (i)); #pragma omp target map(tofrom : array[:s]) { for (int i = 0; i < s; i++) array[i] = switch3 (i); } for (int i = 0; i < s; i++) assert (array[i] == switch3 (i)); #pragma omp target map(tofrom : array[:s]) { for (int i = 0; i < s; i++) array[i] = switch4 (i, i + 1); } for (int i = 0; i < s; i++) assert (array[i] == switch4 (i, i + 1)); #pragma omp target map(tofrom : array[:s]) { for (int i = 0; i < s; i++) array[i] = switch5 (i, i + 1); } for (int i = 0; i < s; i++) assert (array[i] == switch5 (i, i + 1)); }
par_csr_matvec.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.hpp" //RL: TODO par_csr_matvec_device.c, include cuda there /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec *--------------------------------------------------------------------------*/ // y = alpha*A*x + beta*b HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *x_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride( x_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( x_local ); HYPRE_Complex *x_tmp_data, **x_buf_data; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif HYPRE_ANNOTATE_FUNC_BEGIN; /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_assert( idxstride > 0 ); if (num_cols != x_size) { ierr = 11; } if (num_rows != y_size || num_rows != b_size) { ierr = 12; } if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) { ierr = 13; } hypre_assert( hypre_VectorNumVectors(b_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); } else { hypre_assert( num_vectors > 1 ); x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors ); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* x_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for x_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); #endif } hypre_VectorData(x_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(x_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(x_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer( persistent_comm_handle); hypre_SeqVectorSetDataOwner(x_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(x_tmp, HYPRE_MEMORY_DEVICE); x_tmp_data = hypre_VectorData(x_tmp); /* x_buff_data */ x_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); #endif } x_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM x_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); continue; #endif } x_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); //hypre_SeqVectorPrefetch(x_local, HYPRE_MEMORY_DEVICE); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *send_data = (HYPRE_Complex *) x_buf_data[jv]; HYPRE_Complex *locl_data = x_local_data + jv * vecstride; /* if on device, no need to Sync: send_data is on device memory */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* pack send data on device */ HYPRE_THRUST_CALL( gather, hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) + hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), locl_data, send_data ); #elif defined(HYPRE_USING_DEVICE_OPENMP) /* pack send data on device */ HYPRE_Int i; HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #pragma omp target teams distribute parallel for private(i) is_device_ptr(send_data, locl_data, device_send_map_elmts) for (i = start; i < end; i++) { send_data[i] = locl_data[device_send_map_elmts[i]]; } #else HYPRE_Int i; /* pack send data on host */ #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { send_data[i] = locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } #endif } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication starts */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg, HYPRE_MEMORY_DEVICE, x_buf_data[jv], HYPRE_MEMORY_DEVICE, &x_tmp_data[jv * num_cols_offd] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* computation offd part */ if (num_cols_offd) { hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local ); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif HYPRE_ANNOTATE_FUNC_END; return ierr; } HYPRE_Int hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvecT * * Performs y <- alpha * A^T * x + beta * y * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *diagT = hypre_ParCSRMatrixDiagT(A); hypre_CSRMatrix *offdT = hypre_ParCSRMatrixOffdT(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *y_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride(y_local); HYPRE_Int idxstride = hypre_VectorIndexStride(y_local); HYPRE_Complex *y_tmp_data, **y_buf_data; HYPRE_Complex *y_local_data = hypre_VectorData(y_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif HYPRE_ANNOTATE_FUNC_BEGIN; /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_rows != x_size) { ierr = 1; } if (num_cols != y_size) { ierr = 2; } if (num_rows != x_size && num_cols != y_size) { ierr = 3; } hypre_assert( hypre_VectorNumVectors(x_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { y_tmp = hypre_SeqVectorCreate(num_cols_offd); } else { hypre_assert( num_vectors > 1 ); y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd, num_vectors); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* y_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for y_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); #endif } hypre_VectorData(y_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(y_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(y_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer( persistent_comm_handle); hypre_SeqVectorSetDataOwner(y_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(y_tmp, HYPRE_MEMORY_DEVICE); y_tmp_data = hypre_VectorData(y_tmp); /* y_buf_data */ y_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); #endif } y_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM y_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); continue; #endif } y_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif if (num_cols_offd) { if (offdT) { // offdT is optional. Used only if it's present hypre_CSRMatrixMatvec(alpha, offdT, x_local, 0.0, y_tmp); } else { hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { /* this is where we assume multivectors are 'column' storage */ comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 2, comm_pkg, HYPRE_MEMORY_DEVICE, &y_tmp_data[jv * num_cols_offd], HYPRE_MEMORY_DEVICE, y_buf_data[jv] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ if (diagT) { // diagT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, diagT, x_local, beta, y_local); } else { hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *recv_data = (HYPRE_Complex *) y_buf_data[jv]; HYPRE_Complex *locl_data = y_local_data + jv * vecstride; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* unpack recv data on device */ if (!hypre_ParCSRCommPkgWorkSpace(comm_pkg)) { hypre_ParCSRCommPkgWorkSpace(comm_pkg) = hypre_TAlloc( char, (2 * sizeof(HYPRE_Int) + sizeof(HYPRE_Real)) * hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE ); } hypreDevice_GenScatterAdd(locl_data, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), recv_data, hypre_ParCSRCommPkgWorkSpace(comm_pkg)); #elif defined(HYPRE_USING_DEVICE_OPENMP) HYPRE_Int i, j; /* unpack recv data on device */ for (i = 0; i < num_sends; i++) { HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); #pragma omp target teams distribute parallel for private(j) is_device_ptr(recv_data, locl_data, device_send_map_elmts) for (j = start; j < end; j++) { locl_data[device_send_map_elmts[j]] += recv_data[j]; } } #else HYPRE_Int i; /* unpack recv data on host, TODO OMP? */ for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)] += recv_data[i]; } #endif } hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif HYPRE_ANNOTATE_FUNC_END; return ierr; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y, HYPRE_Int *CF_marker, HYPRE_Int fpt ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, index, start, num_procs; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Complex *x_tmp_data = NULL; HYPRE_Complex *x_buf_data = NULL; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); if (num_cols != x_size) { ierr = 11; } if (num_rows != y_size) { ierr = 12; } if (num_cols != x_size && num_rows != y_size) { ierr = 13; } if (num_procs > 1) { if (num_cols_offd) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_sends) x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) x_buf_data[index++] = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data ); } hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker, CF_marker, fpt); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_sends) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); if (num_cols_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd ); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local, CF_marker, CF_marker_offd, fpt); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); } return ierr; }
mhpTest8.c
void foo() { int x = 10; foo(); #pragma omp barrier } int foobar() { } int bar(){ foobar(); } void painter() { printf("Something"); } int main() { #pragma omp parallel { int z = 10; foo(); int q = 40; } foo(); int y = 20; int p = bar(); printf("%d", p); painter(); }
GB_unop__identity_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__(none)) // op(A') function: GB (_unop_tran__identity_uint32_uint32) // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info GB (_unop_apply__(none)) ( uint32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint32_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__pow_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__pow_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__pow_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__pow_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fc32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__pow_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fc32) // C=scalar+B GB (_bind1st__pow_fc32) // C=scalar+B' GB (_bind1st_tran__pow_fc32) // C=A+scalar GB (_bind2nd__pow_fc32) // C=A'+scalar GB (_bind2nd_tran__pow_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // A pattern? 0 // B type: GxB_FC32_t // B pattern? 0 // BinaryOp: cij = GB_cpowf (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_cpowf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FC32 || GxB_NO_POW_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC32_t alpha_scalar ; GxB_FC32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_cpowf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_cpowf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_cpowf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_cpowf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GraphBLAS.h
//------------------------------------------------------------------------------ // GraphBLAS.h: definitions for the GraphBLAS package //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS is a complete implementation of the GraphBLAS // standard, which defines a set of sparse matrix operations on an extended // algebra of semirings, using an almost unlimited variety of operators and // types. When applied to sparse adjacency matrices, these algebraic // operations are equivalent to computations on graphs. GraphBLAS provides a // powerful and expressive framework creating graph algorithms based on the // elegant mathematics of sparse matrix operations on a semiring. // This GraphBLAS.h file contains GraphBLAS definitions for user applications // to #include. A few functions and variables with the prefix GB_ need to be // defined in this file and are thus technically visible to the user, but they // must not be accessed in user code. They are here only so that the ANSI C11 // _Generic feature can be used in the user-accessible polymorphic functions, // or to implement a fast GxB_Iterator using macros. // This implementation conforms to the GraphBLAS API Specification and also // includes functions and features that are extensions to the spec, which are // given names of the form GxB_* for functions, built-in objects, and macros, // so it is clear which are in the spec and which are extensions. Extensions // with the name GxB_* are user-accessible in SuiteSparse:GraphBLAS but cannot // be guaranteed to appear in all GraphBLAS implementations. // Regarding "historical" functions and symbols: when a GxB_* function or // symbol is added to the C API Specification, the new GrB_* name should be // used instead. The old GxB_* name will be kept for historical reasons, // documented here and in working order; it might no longer be mentioned in the // user guide. Historical functions and symbols would only be removed in the // rare case that they cause a serious conflict with future methods. #ifndef GRAPHBLAS_H #define GRAPHBLAS_H //============================================================================== // include files required by GraphBLAS //============================================================================== #include <stdio.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdbool.h> #include <stdint.h> #include <inttypes.h> #include <stddef.h> #include <limits.h> #include <math.h> #include <stdarg.h> //============================================================================== // renaming for use in R2021a or later //============================================================================== #define GB_CAT2(x,y) x ## y #define GB_EVAL2(x,y) GB_CAT2 (x,y) #ifdef GBRENAME // All symbols must be renamed for the @GrB interface when using // R2021a and following, since those versions include an earlier // version of SuiteSparse:GraphBLAS. #define GB(x) GB_EVAL2 (GM_, x) #define GRB(x) GB_EVAL2 (GrM_, x) #define GXB(x) GB_EVAL2 (GxM_, x) #define GrB GrM #define GxB GxM #include "GB_rename.h" #else // Use the standard GraphBLAS prefix. #define GB(x) GB_EVAL2 (GB_, x) #define GRB(x) GB_EVAL2 (GrB_, x) #define GXB(x) GB_EVAL2 (GxB_, x) #endif //============================================================================== // compiler variations //============================================================================== // Exporting/importing symbols for Microsoft Visual Studio #if ( _MSC_VER && !(__INTEL_COMPILER || __INTEL_CLANG_COMPILER) ) #ifdef GB_LIBRARY // compiling SuiteSparse:GraphBLAS itself, exporting symbols to user apps #define GB_PUBLIC extern __declspec ( dllexport ) #else // compiling the user application, importing symbols from SuiteSparse:GraphBLAS #define GB_PUBLIC extern __declspec ( dllimport ) #endif #else // for other compilers #define GB_PUBLIC extern #endif // GraphBLAS requires an ANSI C11 compiler for its polymorphic functions (using // the _Generic keyword), but it can be used in an C90 compiler if those // functions are disabled. // With ANSI C11 and later, _Generic keyword and polymorphic functions can be // used. Earlier versions of the language do not have this feature. #ifdef __STDC_VERSION__ // ANSI C17: 201710L // ANSI C11: 201112L // ANSI C99: 199901L // ANSI C95: 199409L #define GxB_STDC_VERSION __STDC_VERSION__ #else // assume ANSI C90 / C89 #define GxB_STDC_VERSION 199001L #endif //------------------------------------------------------------------------------ // definitions for complex types, and restrict keyword //------------------------------------------------------------------------------ #undef GB_restrict // See: // https://www.drdobbs.com/complex-arithmetic-in-the-intersection-o/184401628# #if defined ( __cplusplus ) extern "C++" { // C++ complex types #include <cmath> #include <complex> #undef I typedef std::complex<float> GxB_FC32_t ; typedef std::complex<double> GxB_FC64_t ; } #define GxB_CMPLXF(r,i) GxB_FC32_t(r,i) #define GxB_CMPLX(r,i) GxB_FC64_t(r,i) #define GB_restrict #elif ( _MSC_VER && !(__INTEL_COMPILER || __INTEL_CLANG_COMPILER) ) // Microsoft Windows complex types #include <complex.h> #undef I typedef _Fcomplex GxB_FC32_t ; typedef _Dcomplex GxB_FC64_t ; #define GxB_CMPLXF(r,i) (_FCbuild (r,i)) #define GxB_CMPLX(r,i) ( _Cbuild (r,i)) #define GB_restrict __restrict #else // ANSI C11 complex types #include <complex.h> #undef I typedef float complex GxB_FC32_t ; typedef double complex GxB_FC64_t ; #ifndef CMPLX // gcc 6.2 on the the Mac doesn't #define CMPLX #define GxB_CMPLX(r,i) \ ((GxB_FC64_t)((double)(r)) + (GxB_FC64_t)((double)(i) * _Complex_I)) #else // use the ANSI C11 CMPLX macro #define GxB_CMPLX(r,i) CMPLX (r,i) #endif #ifndef CMPLXF // gcc 6.2 on the the Mac doesn't #define CMPLXF #define GxB_CMPLXF(r,i) \ ((GxB_FC32_t)((float)(r)) + (GxB_FC32_t)((float)(i) * _Complex_I)) #else // use the ANSI C11 CMPLXF macro #define GxB_CMPLXF(r,i) CMPLXF (r,i) #endif // restrict keyword #if defined ( __NVCC__ ) // NVIDIA nvcc #define GB_restrict __restrict__ #elif GxB_STDC_VERSION >= 199901L // ANSI C99 or later #define GB_restrict restrict #else // ANSI C95 and earlier: no restrict keyword #define GB_restrict #endif #endif //============================================================================== // version control //============================================================================== // There are two version numbers that user codes can check against with // compile-time #if tests: the version of this GraphBLAS implementation, // and the version of the GraphBLAS specification it conforms to. User code // can use tests like this: // // #if GxB_SPEC_VERSION >= GxB_VERSION (2,0,3) // ... use features in GraphBLAS specification 2.0.3 ... // #else // ... only use features in early specifications // #endif // // #if GxB_IMPLEMENTATION > GxB_VERSION (1,4,0) // ... use features from version 1.4.0 of a GraphBLAS package // #endif // X_GRAPHBLAS: names this particular implementation: #define GxB_SUITESPARSE_GRAPHBLAS // GxB_VERSION: a single integer for comparing spec and version levels #define GxB_VERSION(major,minor,sub) \ (((major)*1000ULL + (minor))*1000ULL + (sub)) // The version of this implementation, and the GraphBLAS API version: #define GxB_IMPLEMENTATION_NAME "SuiteSparse:GraphBLAS" #define GxB_IMPLEMENTATION_DATE "Mar 14, 2022" #define GxB_IMPLEMENTATION_MAJOR 6 #define GxB_IMPLEMENTATION_MINOR 2 #define GxB_IMPLEMENTATION_SUB 5 #define GxB_SPEC_DATE "Nov 15, 2021" #define GxB_SPEC_MAJOR 2 #define GxB_SPEC_MINOR 0 #define GxB_SPEC_SUB 0 // compile-time access to the C API Version number of this library. #define GRB_VERSION GxB_SPEC_MAJOR #define GRB_SUBVERSION GxB_SPEC_MINOR #define GxB_IMPLEMENTATION \ GxB_VERSION (GxB_IMPLEMENTATION_MAJOR, \ GxB_IMPLEMENTATION_MINOR, \ GxB_IMPLEMENTATION_SUB) // The 'about' string the describes this particular implementation of GraphBLAS: #define GxB_IMPLEMENTATION_ABOUT \ "SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved." \ "\nhttp://suitesparse.com Dept of Computer Sci. & Eng, Texas A&M University.\n" // The GraphBLAS license for this particular implementation of GraphBLAS: #define GxB_IMPLEMENTATION_LICENSE \ "SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved." \ "\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may\n"\ "not use SuiteSparse:GraphBLAS except in compliance with the License. You\n" \ "may obtain a copy of the License at\n\n" \ " http://www.apache.org/licenses/LICENSE-2.0\n\n" \ "Unless required by applicable law or agreed to in writing, software\n" \ "distributed under the License is distributed on an \"AS IS\" BASIS,\n" \ "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" \ "See the License for the specific language governing permissions and\n" \ "limitations under the License.\n" //------------------------------------------------------------------------------ // GraphBLAS C API version //------------------------------------------------------------------------------ #define GxB_SPEC_VERSION GxB_VERSION(GxB_SPEC_MAJOR,GxB_SPEC_MINOR,GxB_SPEC_SUB) // The 'spec' string describes the GraphBLAS spec: #define GxB_SPEC_ABOUT \ "GraphBLAS C API, by Aydin Buluc, Timothy Mattson, Scott McMillan,\n" \ "Jose' Moreira, Carl Yang, and Benjamin Brock. Based on 'GraphBLAS\n" \ "Mathematics by Jeremy Kepner. See also 'Graph Algorithms in the Language\n" \ "of Linear Algebra,' edited by J. Kepner and J. Gilbert, SIAM, 2011.\n" //============================================================================== // GrB_Index: the GraphBLAS integer //============================================================================== // GrB_Index: row or column index, or matrix dimension. This typedef is used // for row and column indices, or matrix and vector dimensions. typedef uint64_t GrB_Index ; // GrB_INDEX_MAX is the largest permissible index value. The largest valid // matrix or vector dimension is GrB_INDEX_MAX+1, or 2^60 in SuiteSparse:GrB. #define GrB_INDEX_MAX ((GrB_Index) (1ULL << 60) - 1) // GxB_INDEX_MAX is historical; use GrB_INDEX_MAX+1 instead. It differs by one // from GrB_INDEX_MAX, since it defined the largest valid matrix or vector // dimension. #define GxB_INDEX_MAX ((GrB_Index) (1ULL << 60)) //============================================================================== // GraphBLAS error and informational codes //============================================================================== // All GraphBLAS functions return a code that indicates if it was successful // or not. If more information is required, the GrB_error function can be // called, which returns a string that provides more information on the last // return value from GraphBLAS. // The v1.3 C API did not specify the enum values, but they appear in v2.0. // Changing them will require SuiteSparse:GraphBLAS to bump to v6.x. // Error codes GrB_NOT_IMPLEMENTED and GrB_EMPTY_OBJECT are new to v2.0. typedef enum { GrB_SUCCESS = 0, // all is well //-------------------------------------------------------------------------- // informational codes, not an error: //-------------------------------------------------------------------------- GrB_NO_VALUE = 1, // A(i,j) requested but not there GxB_EXHAUSTED = 2, // iterator is exhausted //-------------------------------------------------------------------------- // errors: //-------------------------------------------------------------------------- GrB_UNINITIALIZED_OBJECT = -1, // object has not been initialized GrB_NULL_POINTER = -2, // input pointer is NULL GrB_INVALID_VALUE = -3, // generic error; some value is bad GrB_INVALID_INDEX = -4, // row or column index is out of bounds GrB_DOMAIN_MISMATCH = -5, // object domains are not compatible GrB_DIMENSION_MISMATCH = -6, // matrix dimensions do not match GrB_OUTPUT_NOT_EMPTY = -7, // output matrix already has values GrB_NOT_IMPLEMENTED = -8, // method not implemented GrB_PANIC = -101, // unknown error GrB_OUT_OF_MEMORY = -102, // out of memory GrB_INSUFFICIENT_SPACE = -103, // output array not large enough GrB_INVALID_OBJECT = -104, // object is corrupted GrB_INDEX_OUT_OF_BOUNDS = -105, // row or col index out of bounds GrB_EMPTY_OBJECT = -106 // an object does not contain a value } GrB_Info ; //============================================================================== // GrB_init / GrB_finalize //============================================================================== // GrB_init must called before any other GraphBLAS operation. GrB_finalize // must be called as the last GraphBLAS operation. // GrB_init defines the mode that GraphBLAS will use: blocking or // non-blocking. With blocking mode, all operations finish before returning to // the user application. With non-blocking mode, operations can be left // pending, and are computed only when needed. // The extension GxB_init does the work of GrB_init, but it also defines the // memory management functions that SuiteSparse:GraphBLAS will use internally. typedef enum { GrB_NONBLOCKING = 0, // methods may return with pending computations GrB_BLOCKING = 1 // no computations are ever left pending } GrB_Mode ; GB_PUBLIC GrB_Info GrB_init // start up GraphBLAS ( GrB_Mode mode // blocking or non-blocking mode ) ; GB_PUBLIC GrB_Info GxB_init // start up GraphBLAS and also define malloc, etc ( GrB_Mode mode, // blocking or non-blocking mode // pointers to memory management functions void * (* user_malloc_function ) (size_t), void * (* user_calloc_function ) (size_t, size_t), void * (* user_realloc_function ) (void *, size_t), void (* user_free_function ) (void *) ) ; GB_PUBLIC GrB_Info GrB_finalize (void) ; // finish GraphBLAS //============================================================================== // GrB_getVersion: GraphBLAS C API version //============================================================================== // GrB_getVersion provides a runtime access of the C API Version. GB_PUBLIC GrB_Info GrB_getVersion // runtime access to C API version number ( unsigned int *version, // returns GRB_VERSION unsigned int *subversion // returns GRB_SUBVERSION ) ; //============================================================================== // GrB_Descriptor: the GraphBLAS descriptor //============================================================================== // The GrB_Descriptor is used to modify the behavior of GraphBLAS operations. // // GrB_OUTP: can be GxB_DEFAULT or GrB_REPLACE. If GrB_REPLACE, then C is // cleared after taking part in the accum operation but before the mask. // In other words, C<Mask> = accum (C,T) is split into Z = accum(C,T) ; // C=0 ; C<Mask> = Z. // // GrB_MASK: can be GxB_DEFAULT, GrB_COMP, GrB_STRUCTURE, or set to both // GrB_COMP and GrB_STRUCTURE. If GxB_DEFAULT, the mask is used // normally, where Mask(i,j)=1 means C(i,j) can be modified by C<Mask>=Z, // and Mask(i,j)=0 means it cannot be modified even if Z(i,j) is has been // computed and differs from C(i,j). If GrB_COMP, this is the same as // taking the logical complement of the Mask. If GrB_STRUCTURE is set, // the value of the mask is not considered, just its pattern. The // GrB_COMP and GrB_STRUCTURE settings can be combined. // // GrB_INP0: can be GxB_DEFAULT or GrB_TRAN. If GxB_DEFAULT, the first input // is used as-is. If GrB_TRAN, it is transposed. Only matrices are // transposed this way. Vectors are never transposed via the // GrB_Descriptor. // // GrB_INP1: the same as GrB_INP0 but for the second input // // GxB_NTHREADS: the maximum number of threads to use in the current method. // If <= GxB_DEFAULT (which is zero), then the number of threads is // determined automatically. This is the default value. // // GxB_CHUNK: an integer parameter that determines the number of threads to use // for a small problem. If w is the work to be performed, and chunk is // the value of this parameter, then the # of threads is limited to floor // (w/chunk). The default chunk is currently 64K, but this may change in // the future. If chunk is set to <= GxB_DEFAULT (that is, zero), the // default is used. // // GxB_AxB_METHOD: this is a hint to SuiteSparse:GraphBLAS on which algorithm // it should use to compute C=A*B, in GrB_mxm, GrB_mxv, and GrB_vxm. // SuiteSparse:GraphBLAS has four different heuristics, and the default // method (GxB_DEFAULT) selects between them automatically. The complete // rule is in the User Guide. The brief discussion here assumes all // matrices are stored by column. All methods compute the same result, // except that floating-point roundoff may differ when working on // floating-point data types. // // GxB_AxB_SAXPY: C(:,j)=A*B(:,j) is computed using a mix of Gustavson // and Hash methods. Each task in the parallel computation makes its // own decision between these two methods, via a heuristic. // // GxB_AxB_GUSTAVSON: This is the same as GxB_AxB_SAXPY, except that // every task uses Gustavon's method, computing C(:,j)=A*B(:,j) via a // gather/scatter workspace of size equal to the number of rows of A. // Very good general-purpose method, but sometimes the workspace can // be too large when many threads are used. // // GxB_AxB_HASH: This is the same as GxB_AxB_SAXPY, except that every // task uses the Hash method. It is very good for hypersparse // matrices and uses very little workspace, and so it scales well to // many threads. // // GxB_AxB_DOT: computes C(i,j) = A(:,i)'*B(:,j), for each entry C(i,j). // A very specialized method that works well only if the mask is // present, very sparse, and not complemented, or when C is a dense // vector or matrix, or when C is small. // // GxB_SORT: GrB_mxm and other methods may return a matrix in a 'jumbled' // state, with indices out of order. The sort is left pending. Some // methods can tolerate jumbled matrices on input, so this can be faster. // However, in some cases, it can be faster for GrB_mxm to sort its output // as it is computed. With GxB_SORT set to GxB_DEFAULT, the sort is left // pending. With GxB_SORT set to a nonzero value, GrB_mxm typically sorts // the resulting matrix C (but not always; this is just a hint). If // GrB_init is called with GrB_BLOCKING mode, the sort will always be // done, and this setting has no effect. // // GxB_COMPRESSION: compression method for GxB_Matrix_serialize and // GxB_Vector_serialize. The default is LZ4. // // GxB_IMPORT: GxB_FAST_IMPORT (faster, for trusted input data) or // GxB_SECURE_IMPORT (slower, for untrusted input data), for the // GxB*_pack* methods. // The following are enumerated values in both the GrB_Desc_Field and the // GxB_Option_Field for global options. They are defined with the same integer // value for both enums, so the user can use them for both. #define GxB_NTHREADS 5 #define GxB_CHUNK 7 // GPU control (DRAFT: in progress, do not use) #define GxB_GPU_CONTROL 21 #define GxB_GPU_CHUNK 22 typedef enum { GrB_OUTP = 0, // descriptor for output of a method GrB_MASK = 1, // descriptor for the mask input of a method GrB_INP0 = 2, // descriptor for the first input of a method GrB_INP1 = 3, // descriptor for the second input of a method GxB_DESCRIPTOR_NTHREADS = GxB_NTHREADS, // max number of threads to use. // If <= GxB_DEFAULT, then GraphBLAS selects the number // of threads automatically. GxB_DESCRIPTOR_CHUNK = GxB_CHUNK, // chunk size for small problems. // If <= GxB_DEFAULT, then the default is used. // GPU control (DRAFT: in progress, do not use) GxB_DESCRIPTOR_GPU_CONTROL = GxB_GPU_CONTROL, GxB_DESCRIPTOR_GPU_CHUNK = GxB_GPU_CHUNK, GxB_AxB_METHOD = 1000, // descriptor for selecting C=A*B algorithm GxB_SORT = 35, // control sort in GrB_mxm GxB_COMPRESSION = 36, // select compression for serialize GxB_IMPORT = 37, // secure vs fast import } GrB_Desc_Field ; typedef enum { // for all GrB_Descriptor fields: GxB_DEFAULT = 0, // default behavior of the method // for GrB_OUTP only: GrB_REPLACE = 1, // clear the output before assigning new values to it // for GrB_MASK only: GrB_COMP = 2, // use the structural complement of the input GrB_STRUCTURE = 4, // use the only pattern of the mask, not its values // for GrB_INP0 and GrB_INP1 only: GrB_TRAN = 3, // use the transpose of the input // for GxB_GPU_CONTROL only (DRAFT: in progress, do not use) GxB_GPU_ALWAYS = 2001, GxB_GPU_NEVER = 2002, // for GxB_AxB_METHOD only: GxB_AxB_GUSTAVSON = 1001, // gather-scatter saxpy method GxB_AxB_DOT = 1003, // dot product GxB_AxB_HASH = 1004, // hash-based saxpy method GxB_AxB_SAXPY = 1005, // saxpy method (any kind) // for GxB_IMPORT only: GxB_SECURE_IMPORT = 502 // GxB*_pack* methods trust their input data } GrB_Desc_Value ; // default for GxB pack is to trust the input data #define GxB_FAST_IMPORT GxB_DEFAULT typedef struct GB_Descriptor_opaque *GrB_Descriptor ; GB_PUBLIC GrB_Info GrB_Descriptor_new // create a new descriptor ( GrB_Descriptor *descriptor // handle of descriptor to create ) ; GB_PUBLIC GrB_Info GrB_Descriptor_set // set a parameter in a descriptor ( GrB_Descriptor desc, // descriptor to modify GrB_Desc_Field field, // parameter to change GrB_Desc_Value val // value to change it to ) ; GB_PUBLIC GrB_Info GxB_Descriptor_get // get a parameter from a descriptor ( GrB_Desc_Value *val, // value of the parameter GrB_Descriptor desc, // descriptor to query; NULL means defaults GrB_Desc_Field field // parameter to query ) ; GB_PUBLIC GrB_Info GxB_Desc_set // set a parameter in a descriptor ( GrB_Descriptor desc, // descriptor to modify GrB_Desc_Field field, // parameter to change ... // value to change it to ) ; GB_PUBLIC GrB_Info GxB_Desc_get // get a parameter from a descriptor ( GrB_Descriptor desc, // descriptor to query; NULL means defaults GrB_Desc_Field field, // parameter to query ... // value of the parameter ) ; GB_PUBLIC GrB_Info GrB_Descriptor_free // free a descriptor ( GrB_Descriptor *descriptor // handle of descriptor to free ) ; // Predefined descriptors and their values: GB_PUBLIC GrB_Descriptor // OUTP MASK MASK INP0 INP1 // structural complement // =========== ============== ========== ======== ======== // GrB_NULL // - - - - - GrB_DESC_T1 , // - - - - GrB_TRAN GrB_DESC_T0 , // - - - GrB_TRAN - GrB_DESC_T0T1 , // - - - GrB_TRAN GrB_TRAN GrB_DESC_C , // - - GrB_COMP - - GrB_DESC_CT1 , // - - GrB_COMP - GrB_TRAN GrB_DESC_CT0 , // - - GrB_COMP GrB_TRAN - GrB_DESC_CT0T1 , // - - GrB_COMP GrB_TRAN GrB_TRAN GrB_DESC_S , // - GrB_STRUCTURE - - - GrB_DESC_ST1 , // - GrB_STRUCTURE - - GrB_TRAN GrB_DESC_ST0 , // - GrB_STRUCTURE - GrB_TRAN - GrB_DESC_ST0T1 , // - GrB_STRUCTURE - GrB_TRAN GrB_TRAN GrB_DESC_SC , // - GrB_STRUCTURE GrB_COMP - - GrB_DESC_SCT1 , // - GrB_STRUCTURE GrB_COMP - GrB_TRAN GrB_DESC_SCT0 , // - GrB_STRUCTURE GrB_COMP GrB_TRAN - GrB_DESC_SCT0T1 , // - GrB_STRUCTURE GrB_COMP GrB_TRAN GrB_TRAN GrB_DESC_R , // GrB_REPLACE - - - - GrB_DESC_RT1 , // GrB_REPLACE - - - GrB_TRAN GrB_DESC_RT0 , // GrB_REPLACE - - GrB_TRAN - GrB_DESC_RT0T1 , // GrB_REPLACE - - GrB_TRAN GrB_TRAN GrB_DESC_RC , // GrB_REPLACE - GrB_COMP - - GrB_DESC_RCT1 , // GrB_REPLACE - GrB_COMP - GrB_TRAN GrB_DESC_RCT0 , // GrB_REPLACE - GrB_COMP GrB_TRAN - GrB_DESC_RCT0T1 , // GrB_REPLACE - GrB_COMP GrB_TRAN GrB_TRAN GrB_DESC_RS , // GrB_REPLACE GrB_STRUCTURE - - - GrB_DESC_RST1 , // GrB_REPLACE GrB_STRUCTURE - - GrB_TRAN GrB_DESC_RST0 , // GrB_REPLACE GrB_STRUCTURE - GrB_TRAN - GrB_DESC_RST0T1 , // GrB_REPLACE GrB_STRUCTURE - GrB_TRAN GrB_TRAN GrB_DESC_RSC , // GrB_REPLACE GrB_STRUCTURE GrB_COMP - - GrB_DESC_RSCT1 , // GrB_REPLACE GrB_STRUCTURE GrB_COMP - GrB_TRAN GrB_DESC_RSCT0 , // GrB_REPLACE GrB_STRUCTURE GrB_COMP GrB_TRAN - GrB_DESC_RSCT0T1 ; // GrB_REPLACE GrB_STRUCTURE GrB_COMP GrB_TRAN GrB_TRAN // GrB_NULL is the default descriptor, with all settings at their defaults: // // OUTP: do not replace the output // MASK: mask is valued and not complemented // INP0: first input not transposed // INP1: second input not transposed // Predefined descriptors may not be modified or freed. Attempting to modify // them results in an error (GrB_INVALID_VALUE). Attempts to free them are // silently ignored. //============================================================================== // GrB_Type: data types //============================================================================== typedef struct GB_Type_opaque *GrB_Type ; // GraphBLAS predefined types and their counterparts in pure C: GB_PUBLIC GrB_Type GrB_BOOL , // in C: bool GrB_INT8 , // in C: int8_t GrB_INT16 , // in C: int16_t GrB_INT32 , // in C: int32_t GrB_INT64 , // in C: int64_t GrB_UINT8 , // in C: uint8_t GrB_UINT16 , // in C: uint16_t GrB_UINT32 , // in C: uint32_t GrB_UINT64 , // in C: uint64_t GrB_FP32 , // in C: float GrB_FP64 , // in C: double GxB_FC32 , // in C: float complex GxB_FC64 ; // in C: double complex //------------------------------------------------------------------------------ // helper macros for polymorphic functions //------------------------------------------------------------------------------ #define GB_CAT(w,x,y,z) w ## x ## y ## z #define GB_CONCAT(w,x,y,z) GB_CAT (w, x, y, z) #if GxB_STDC_VERSION >= 201112L #define GB_CASES(p,prefix,func) \ const bool p : GB_CONCAT ( prefix, _, func, _BOOL ), \ bool p : GB_CONCAT ( prefix, _, func, _BOOL ), \ const int8_t p : GB_CONCAT ( prefix, _, func, _INT8 ), \ int8_t p : GB_CONCAT ( prefix, _, func, _INT8 ), \ const int16_t p : GB_CONCAT ( prefix, _, func, _INT16 ), \ int16_t p : GB_CONCAT ( prefix, _, func, _INT16 ), \ const int32_t p : GB_CONCAT ( prefix, _, func, _INT32 ), \ int32_t p : GB_CONCAT ( prefix, _, func, _INT32 ), \ const int64_t p : GB_CONCAT ( prefix, _, func, _INT64 ), \ int64_t p : GB_CONCAT ( prefix, _, func, _INT64 ), \ const uint8_t p : GB_CONCAT ( prefix, _, func, _UINT8 ), \ uint8_t p : GB_CONCAT ( prefix, _, func, _UINT8 ), \ const uint16_t p : GB_CONCAT ( prefix, _, func, _UINT16 ), \ uint16_t p : GB_CONCAT ( prefix, _, func, _UINT16 ), \ const uint32_t p : GB_CONCAT ( prefix, _, func, _UINT32 ), \ uint32_t p : GB_CONCAT ( prefix, _, func, _UINT32 ), \ const uint64_t p : GB_CONCAT ( prefix, _, func, _UINT64 ), \ uint64_t p : GB_CONCAT ( prefix, _, func, _UINT64 ), \ const float p : GB_CONCAT ( prefix, _, func, _FP32 ), \ float p : GB_CONCAT ( prefix, _, func, _FP32 ), \ const double p : GB_CONCAT ( prefix, _, func, _FP64 ), \ double p : GB_CONCAT ( prefix, _, func, _FP64 ), \ const GxB_FC32_t p : GB_CONCAT ( GxB , _, func, _FC32 ), \ GxB_FC32_t p : GB_CONCAT ( GxB , _, func, _FC32 ), \ const GxB_FC64_t p : GB_CONCAT ( GxB , _, func, _FC64 ), \ GxB_FC64_t p : GB_CONCAT ( GxB , _, func, _FC64 ), \ const void * : GB_CONCAT ( prefix, _, func, _UDT ), \ void * : GB_CONCAT ( prefix, _, func, _UDT ) #endif //------------------------------------------------------------------------------ // GrB_Type_new: create a new type //------------------------------------------------------------------------------ // GrB_Type_new is implemented both as a macro and a function. Both are // user-callable. The default is to use the macro, since this allows the name // of the type to be saved as a string, for subsequent error reporting by // GrB_error. #undef GrB_Type_new #undef GrM_Type_new GB_PUBLIC GrB_Info GRB (Type_new) // create a new GraphBLAS type ( GrB_Type *type, // handle of user type to create size_t sizeof_ctype // size = sizeof (ctype) of the C type ) ; // user code should not directly use GB_STR or GB_XSTR // GB_STR: convert the content of x into a string "x" #define GB_XSTR(x) GB_STR(x) #define GB_STR(x) #x // GrB_Type_new as a user-callable macro, which allows the name of the ctype // to be added to the new type. The type_defn is unknown. #define GrB_Type_new(utype, sizeof_ctype) \ GxB_Type_new(utype, sizeof_ctype, GB_STR(sizeof_ctype), NULL) #define GrM_Type_new(utype, sizeof_ctype) \ GxB_Type_new(utype, sizeof_ctype, GB_STR(sizeof_ctype), NULL) // GxB_Type_new creates a type with a name and definition that are known to // GraphBLAS, as strings. The type_name is any valid string (max length of 128 // characters, including the required null-terminating character) that may // appear as the name of a C type created by a C "typedef" statement. It must // not contain any white-space characters. Example, creating a type of size // 16*4+4 = 68 bytes, with a 4-by-4 dense float array and a 32-bit integer: // // typedef struct { float x [4][4] ; int color ; } myquaternion ; // GrB_Type MyQtype ; // GxB_Type_new (&MyQtype, sizeof (myquaternion), "myquaternion", // "typedef struct { float x [4][4] ; int color ; } myquaternion ;") ; // // The type_name and type_defn are both null-terminated strings. Currently, // type_defn is unused, but it will be required for best performance when a JIT // is implemented in SuiteSparse:GraphBLAS (both on the CPU and GPU). User // defined types created by GrB_Type_new will not work with a JIT. // // At most GxB_MAX_NAME_LEN characters are accessed in type_name; characters // beyond that limit are silently ignored. #define GxB_MAX_NAME_LEN 128 GB_PUBLIC GrB_Info GxB_Type_new // create a new named GraphBLAS type ( GrB_Type *type, // handle of user type to create size_t sizeof_ctype, // size = sizeof (ctype) of the C type const char *type_name, // name of the type (max 128 characters) const char *type_defn // typedef for the type (no max length) ) ; // GB_Type_new is historical: use GxB_Type_new instead GB_PUBLIC GrB_Info GB_Type_new // not user-callable ( GrB_Type *type, // handle of user type to create size_t sizeof_ctype, // size of the user type const char *type_name // name of the type, as "sizeof (ctype)" ) ; GB_PUBLIC GrB_Info GxB_Type_name // return the name of a GraphBLAS type ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). const GrB_Type type ) ; GB_PUBLIC GrB_Info GxB_Type_size // determine the size of the type ( size_t *size, // the sizeof the type const GrB_Type type // type to determine the sizeof ) ; GB_PUBLIC GrB_Info GxB_Type_from_name // return the built-in GrB_Type from a name ( GrB_Type *type, // built-in type, or NULL if user-defined const char *type_name // array of size at least GxB_MAX_NAME_LEN ) ; GB_PUBLIC GrB_Info GrB_Type_free // free a user-defined type ( GrB_Type *type // handle of user-defined type to free ) ; //============================================================================== // GrB_UnaryOp: unary operators //============================================================================== // GrB_UnaryOp: a function z=f(x). The function f must have the signature: // void f (void *z, const void *x) ; // The pointers are void * but they are always of pointers to objects of type // ztype and xtype, respectively. The function must typecast its arguments as // needed from void* to ztype* and xtype*. typedef struct GB_UnaryOp_opaque *GrB_UnaryOp ; //------------------------------------------------------------------------------ // built-in unary operators, z = f(x) //------------------------------------------------------------------------------ GB_PUBLIC GrB_UnaryOp // For these functions z=f(x), z and x have the same type. // The suffix in the name is the type of x and z. // z = x z = -x z = 1/x z = ! (x != 0) // identity additive multiplicative logical // inverse inverse negation GrB_IDENTITY_BOOL, GrB_AINV_BOOL, GrB_MINV_BOOL, GxB_LNOT_BOOL, GrB_IDENTITY_INT8, GrB_AINV_INT8, GrB_MINV_INT8, GxB_LNOT_INT8, GrB_IDENTITY_INT16, GrB_AINV_INT16, GrB_MINV_INT16, GxB_LNOT_INT16, GrB_IDENTITY_INT32, GrB_AINV_INT32, GrB_MINV_INT32, GxB_LNOT_INT32, GrB_IDENTITY_INT64, GrB_AINV_INT64, GrB_MINV_INT64, GxB_LNOT_INT64, GrB_IDENTITY_UINT8, GrB_AINV_UINT8, GrB_MINV_UINT8, GxB_LNOT_UINT8, GrB_IDENTITY_UINT16, GrB_AINV_UINT16, GrB_MINV_UINT16, GxB_LNOT_UINT16, GrB_IDENTITY_UINT32, GrB_AINV_UINT32, GrB_MINV_UINT32, GxB_LNOT_UINT32, GrB_IDENTITY_UINT64, GrB_AINV_UINT64, GrB_MINV_UINT64, GxB_LNOT_UINT64, GrB_IDENTITY_FP32, GrB_AINV_FP32, GrB_MINV_FP32, GxB_LNOT_FP32, GrB_IDENTITY_FP64, GrB_AINV_FP64, GrB_MINV_FP64, GxB_LNOT_FP64, // complex unary operators: GxB_IDENTITY_FC32, GxB_AINV_FC32, GxB_MINV_FC32, // no LNOT GxB_IDENTITY_FC64, GxB_AINV_FC64, GxB_MINV_FC64, // for complex // z = 1 z = abs(x) z = bnot(x) z = signum // one absolute value bitwise negation GxB_ONE_BOOL, GrB_ABS_BOOL, GxB_ONE_INT8, GrB_ABS_INT8, GrB_BNOT_INT8, GxB_ONE_INT16, GrB_ABS_INT16, GrB_BNOT_INT16, GxB_ONE_INT32, GrB_ABS_INT32, GrB_BNOT_INT32, GxB_ONE_INT64, GrB_ABS_INT64, GrB_BNOT_INT64, GxB_ONE_UINT8, GrB_ABS_UINT8, GrB_BNOT_UINT8, GxB_ONE_UINT16, GrB_ABS_UINT16, GrB_BNOT_UINT16, GxB_ONE_UINT32, GrB_ABS_UINT32, GrB_BNOT_UINT32, GxB_ONE_UINT64, GrB_ABS_UINT64, GrB_BNOT_UINT64, GxB_ONE_FP32, GrB_ABS_FP32, GxB_ONE_FP64, GrB_ABS_FP64, // complex unary operators: GxB_ONE_FC32, // for complex types, z = abs(x) GxB_ONE_FC64, // is real; listed below. // Boolean negation, z = !x, where both z and x are boolean. There is no // suffix since z and x are only boolean. This operator is identical to // GxB_LNOT_BOOL; it just has a different name. GrB_LNOT ; // GxB_ABS is now in the v1.3 spec, the following names are historical: GB_PUBLIC GrB_UnaryOp // z = abs(x) GxB_ABS_BOOL, GxB_ABS_INT8, GxB_ABS_INT16, GxB_ABS_INT32, GxB_ABS_INT64, GxB_ABS_UINT8, GxB_ABS_UINT16, GxB_ABS_UINT32, GxB_ABS_UINT64, GxB_ABS_FP32, GxB_ABS_FP64 ; //------------------------------------------------------------------------------ // Unary operators for floating-point types only //------------------------------------------------------------------------------ // The following floating-point unary operators and their ANSI C11 equivalents, // are only defined for floating-point (real and complex) types. GB_PUBLIC GrB_UnaryOp //-------------------------------------------------------------------------- // z = f(x) where z and x have the same type (all 4 floating-point types) //-------------------------------------------------------------------------- // z = sqrt (x) z = log (x) z = exp (x) z = log2 (x) GxB_SQRT_FP32, GxB_LOG_FP32, GxB_EXP_FP32, GxB_LOG2_FP32, GxB_SQRT_FP64, GxB_LOG_FP64, GxB_EXP_FP64, GxB_LOG2_FP64, GxB_SQRT_FC32, GxB_LOG_FC32, GxB_EXP_FC32, GxB_LOG2_FC32, GxB_SQRT_FC64, GxB_LOG_FC64, GxB_EXP_FC64, GxB_LOG2_FC64, // z = sin (x) z = cos (x) z = tan (x) GxB_SIN_FP32, GxB_COS_FP32, GxB_TAN_FP32, GxB_SIN_FP64, GxB_COS_FP64, GxB_TAN_FP64, GxB_SIN_FC32, GxB_COS_FC32, GxB_TAN_FC32, GxB_SIN_FC64, GxB_COS_FC64, GxB_TAN_FC64, // z = acos (x) z = asin (x) z = atan (x) GxB_ACOS_FP32, GxB_ASIN_FP32, GxB_ATAN_FP32, GxB_ACOS_FP64, GxB_ASIN_FP64, GxB_ATAN_FP64, GxB_ACOS_FC32, GxB_ASIN_FC32, GxB_ATAN_FC32, GxB_ACOS_FC64, GxB_ASIN_FC64, GxB_ATAN_FC64, // z = sinh (x) z = cosh (x) z = tanh (x) GxB_SINH_FP32, GxB_COSH_FP32, GxB_TANH_FP32, GxB_SINH_FP64, GxB_COSH_FP64, GxB_TANH_FP64, GxB_SINH_FC32, GxB_COSH_FC32, GxB_TANH_FC32, GxB_SINH_FC64, GxB_COSH_FC64, GxB_TANH_FC64, // z = acosh (x) z = asinh (x) z = atanh (x) z = signum (x) GxB_ACOSH_FP32, GxB_ASINH_FP32, GxB_ATANH_FP32, GxB_SIGNUM_FP32, GxB_ACOSH_FP64, GxB_ASINH_FP64, GxB_ATANH_FP64, GxB_SIGNUM_FP64, GxB_ACOSH_FC32, GxB_ASINH_FC32, GxB_ATANH_FC32, GxB_SIGNUM_FC32, GxB_ACOSH_FC64, GxB_ASINH_FC64, GxB_ATANH_FC64, GxB_SIGNUM_FC64, // z = ceil (x) z = floor (x) z = round (x) z = trunc (x) GxB_CEIL_FP32, GxB_FLOOR_FP32, GxB_ROUND_FP32, GxB_TRUNC_FP32, GxB_CEIL_FP64, GxB_FLOOR_FP64, GxB_ROUND_FP64, GxB_TRUNC_FP64, GxB_CEIL_FC32, GxB_FLOOR_FC32, GxB_ROUND_FC32, GxB_TRUNC_FC32, GxB_CEIL_FC64, GxB_FLOOR_FC64, GxB_ROUND_FC64, GxB_TRUNC_FC64, // z = exp2 (x) z = expm1 (x) z = log10 (x) z = log1p (x) GxB_EXP2_FP32, GxB_EXPM1_FP32, GxB_LOG10_FP32, GxB_LOG1P_FP32, GxB_EXP2_FP64, GxB_EXPM1_FP64, GxB_LOG10_FP64, GxB_LOG1P_FP64, GxB_EXP2_FC32, GxB_EXPM1_FC32, GxB_LOG10_FC32, GxB_LOG1P_FC32, GxB_EXP2_FC64, GxB_EXPM1_FC64, GxB_LOG10_FC64, GxB_LOG1P_FC64, //-------------------------------------------------------------------------- // z = f(x) where z and x are the same type (floating-point real only) //-------------------------------------------------------------------------- // z = lgamma (x) z = tgamma (x) z = erf (x) z = erfc (x) GxB_LGAMMA_FP32, GxB_TGAMMA_FP32, GxB_ERF_FP32, GxB_ERFC_FP32, GxB_LGAMMA_FP64, GxB_TGAMMA_FP64, GxB_ERF_FP64, GxB_ERFC_FP64, // frexpx and frexpe return the mantissa and exponent, respectively, // from the ANSI C11 frexp function. The exponent is returned as a // floating-point value, not an integer. // z = frexpx (x) z = frexpe (x) GxB_FREXPX_FP32, GxB_FREXPE_FP32, GxB_FREXPX_FP64, GxB_FREXPE_FP64, //-------------------------------------------------------------------------- // z = f(x) where z and x are the same type (complex only) //-------------------------------------------------------------------------- // z = conj (x) GxB_CONJ_FC32, GxB_CONJ_FC64, //-------------------------------------------------------------------------- // z = f(x) where z is real and x is complex: //-------------------------------------------------------------------------- // z = creal (x) z = cimag (x) z = carg (x) z = abs (x) GxB_CREAL_FC32, GxB_CIMAG_FC32, GxB_CARG_FC32, GxB_ABS_FC32, GxB_CREAL_FC64, GxB_CIMAG_FC64, GxB_CARG_FC64, GxB_ABS_FC64, //-------------------------------------------------------------------------- // z = f(x) where z is bool and x is any floating-point type //-------------------------------------------------------------------------- // z = isinf (x) GxB_ISINF_FP32, GxB_ISINF_FP64, GxB_ISINF_FC32, // isinf (creal (x)) || isinf (cimag (x)) GxB_ISINF_FC64, // isinf (creal (x)) || isinf (cimag (x)) // z = isnan (x) GxB_ISNAN_FP32, GxB_ISNAN_FP64, GxB_ISNAN_FC32, // isnan (creal (x)) || isnan (cimag (x)) GxB_ISNAN_FC64, // isnan (creal (x)) || isnan (cimag (x)) // z = isfinite (x) GxB_ISFINITE_FP32, GxB_ISFINITE_FP64, GxB_ISFINITE_FC32, // isfinite (real (x)) && isfinite (cimag (x)) GxB_ISFINITE_FC64 ; // isfinite (real (x)) && isfinite (cimag (x)) //------------------------------------------------------------------------------ // methods for unary operators //------------------------------------------------------------------------------ typedef void (*GxB_unary_function) (void *, const void *) ; // GrB_UnaryOp_new creates a user-defined unary op, with an automatic // detection of the operator name. #undef GrB_UnaryOp_new #undef GrM_UnaryOp_new GB_PUBLIC GrB_Info GRB (UnaryOp_new) // create a new user-defined unary operator ( GrB_UnaryOp *unaryop, // handle for the new unary operator GxB_unary_function function, // pointer to the unary function GrB_Type ztype, // type of output z GrB_Type xtype // type of input x ) ; #define GrB_UnaryOp_new(op,f,z,x) \ GxB_UnaryOp_new(op,f,z,x, GB_STR(f), NULL) #define GrM_UnaryOp_new(op,f,z,x) \ GxM_UnaryOp_new(op,f,z,x, GB_STR(f), NULL) // GxB_UnaryOp_new creates a named user-defined unary op. GB_PUBLIC GrB_Info GxB_UnaryOp_new // create a new user-defined unary operator ( GrB_UnaryOp *unaryop, // handle for the new unary operator GxB_unary_function function, // pointer to the unary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x const char *unop_name, // name of the user function const char *unop_defn // definition of the user function ) ; // GB_UnaryOp_new is historical: use GxB_UnaryOp_new instead GB_PUBLIC GrB_Info GB_UnaryOp_new // not user-callable ( GrB_UnaryOp *unaryop, // handle for the new unary operator GxB_unary_function function, // pointer to the unary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x const char *unop_name // name of the user function ) ; // GxB_UnaryOp_ztype is historical. Use GxB_UnaryOp_ztype_name instead. GB_PUBLIC GrB_Info GxB_UnaryOp_ztype // return the type of z ( GrB_Type *ztype, // return type of output z GrB_UnaryOp unaryop // unary operator ) ; GB_PUBLIC GrB_Info GxB_UnaryOp_ztype_name // return the type_name of z ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_UnaryOp unaryop // unary operator ) ; // GxB_UnaryOp_xtype is historical. Use GxB_UnaryOp_xtype_name instead. GB_PUBLIC GrB_Info GxB_UnaryOp_xtype // return the type of x ( GrB_Type *xtype, // return type of input x GrB_UnaryOp unaryop // unary operator ) ; GB_PUBLIC GrB_Info GxB_UnaryOp_xtype_name // return the type_name of x ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_UnaryOp unaryop // unary operator ) ; GB_PUBLIC GrB_Info GrB_UnaryOp_free // free a user-created unary operator ( GrB_UnaryOp *unaryop // handle of unary operator to free ) ; //============================================================================== // GrB_BinaryOp: binary operators //============================================================================== // GrB_BinaryOp: a function z=f(x,y). The function f must have the signature: // void f (void *z, const void *x, const void *y) ; // The pointers are void * but they are always of pointers to objects of type // ztype, xtype, and ytype, respectively. See Demo/usercomplex.c for examples. typedef struct GB_BinaryOp_opaque *GrB_BinaryOp ; //------------------------------------------------------------------------------ // built-in binary operators, z = f(x,y), where x,y,z all have the same type //------------------------------------------------------------------------------ GB_PUBLIC GrB_BinaryOp // operators for all 13 types (including complex): // GxB_PAIR_T and GrB_ONEB_T are identical; the latter was added to the // v2.0 C API Specification. // z = x z = y z = 1 z = pow (x,y) GrB_FIRST_BOOL, GrB_SECOND_BOOL, GrB_ONEB_BOOL, GxB_POW_BOOL, GrB_FIRST_INT8, GrB_SECOND_INT8, GrB_ONEB_INT8, GxB_POW_INT8, GrB_FIRST_INT16, GrB_SECOND_INT16, GrB_ONEB_INT16, GxB_POW_INT16, GrB_FIRST_INT32, GrB_SECOND_INT32, GrB_ONEB_INT32, GxB_POW_INT32, GrB_FIRST_INT64, GrB_SECOND_INT64, GrB_ONEB_INT64, GxB_POW_INT64, GrB_FIRST_UINT8, GrB_SECOND_UINT8, GrB_ONEB_UINT8, GxB_POW_UINT8, GrB_FIRST_UINT16, GrB_SECOND_UINT16, GrB_ONEB_UINT16, GxB_POW_UINT16, GrB_FIRST_UINT32, GrB_SECOND_UINT32, GrB_ONEB_UINT32, GxB_POW_UINT32, GrB_FIRST_UINT64, GrB_SECOND_UINT64, GrB_ONEB_UINT64, GxB_POW_UINT64, GrB_FIRST_FP32, GrB_SECOND_FP32, GrB_ONEB_FP32, GxB_POW_FP32, GrB_FIRST_FP64, GrB_SECOND_FP64, GrB_ONEB_FP64, GxB_POW_FP64, // complex: GxB_FIRST_FC32, GxB_SECOND_FC32, GxB_ONEB_FC32, GxB_POW_FC32, GxB_FIRST_FC64, GxB_SECOND_FC64, GxB_ONEB_FC64, GxB_POW_FC64, // z = x+y z = x-y z = x*y z = x/y GrB_PLUS_BOOL, GrB_MINUS_BOOL, GrB_TIMES_BOOL, GrB_DIV_BOOL, GrB_PLUS_INT8, GrB_MINUS_INT8, GrB_TIMES_INT8, GrB_DIV_INT8, GrB_PLUS_INT16, GrB_MINUS_INT16, GrB_TIMES_INT16, GrB_DIV_INT16, GrB_PLUS_INT32, GrB_MINUS_INT32, GrB_TIMES_INT32, GrB_DIV_INT32, GrB_PLUS_INT64, GrB_MINUS_INT64, GrB_TIMES_INT64, GrB_DIV_INT64, GrB_PLUS_UINT8, GrB_MINUS_UINT8, GrB_TIMES_UINT8, GrB_DIV_UINT8, GrB_PLUS_UINT16, GrB_MINUS_UINT16, GrB_TIMES_UINT16, GrB_DIV_UINT16, GrB_PLUS_UINT32, GrB_MINUS_UINT32, GrB_TIMES_UINT32, GrB_DIV_UINT32, GrB_PLUS_UINT64, GrB_MINUS_UINT64, GrB_TIMES_UINT64, GrB_DIV_UINT64, GrB_PLUS_FP32, GrB_MINUS_FP32, GrB_TIMES_FP32, GrB_DIV_FP32, GrB_PLUS_FP64, GrB_MINUS_FP64, GrB_TIMES_FP64, GrB_DIV_FP64, // complex: GxB_PLUS_FC32, GxB_MINUS_FC32, GxB_TIMES_FC32, GxB_DIV_FC32, GxB_PLUS_FC64, GxB_MINUS_FC64, GxB_TIMES_FC64, GxB_DIV_FC64, // z = y-x z = y/x z = 1 z = any(x,y) GxB_RMINUS_BOOL, GxB_RDIV_BOOL, GxB_PAIR_BOOL, GxB_ANY_BOOL, GxB_RMINUS_INT8, GxB_RDIV_INT8, GxB_PAIR_INT8, GxB_ANY_INT8, GxB_RMINUS_INT16, GxB_RDIV_INT16, GxB_PAIR_INT16, GxB_ANY_INT16, GxB_RMINUS_INT32, GxB_RDIV_INT32, GxB_PAIR_INT32, GxB_ANY_INT32, GxB_RMINUS_INT64, GxB_RDIV_INT64, GxB_PAIR_INT64, GxB_ANY_INT64, GxB_RMINUS_UINT8, GxB_RDIV_UINT8, GxB_PAIR_UINT8, GxB_ANY_UINT8, GxB_RMINUS_UINT16, GxB_RDIV_UINT16, GxB_PAIR_UINT16, GxB_ANY_UINT16, GxB_RMINUS_UINT32, GxB_RDIV_UINT32, GxB_PAIR_UINT32, GxB_ANY_UINT32, GxB_RMINUS_UINT64, GxB_RDIV_UINT64, GxB_PAIR_UINT64, GxB_ANY_UINT64, GxB_RMINUS_FP32, GxB_RDIV_FP32, GxB_PAIR_FP32, GxB_ANY_FP32, GxB_RMINUS_FP64, GxB_RDIV_FP64, GxB_PAIR_FP64, GxB_ANY_FP64, // complex: GxB_RMINUS_FC32, GxB_RDIV_FC32, GxB_PAIR_FC32, GxB_ANY_FC32, GxB_RMINUS_FC64, GxB_RDIV_FC64, GxB_PAIR_FC64, GxB_ANY_FC64, // The GxB_IS* comparators z=f(x,y) return the same type as their // inputs. Each of them compute z = (x OP y), where x, y, and z all have // the same type. The value z is either 1 for true or 0 for false, but it // is a value with the same type as x and y. // z = (x == y) z = (x != y) GxB_ISEQ_BOOL, GxB_ISNE_BOOL, GxB_ISEQ_INT8, GxB_ISNE_INT8, GxB_ISEQ_INT16, GxB_ISNE_INT16, GxB_ISEQ_INT32, GxB_ISNE_INT32, GxB_ISEQ_INT64, GxB_ISNE_INT64, GxB_ISEQ_UINT8, GxB_ISNE_UINT8, GxB_ISEQ_UINT16, GxB_ISNE_UINT16, GxB_ISEQ_UINT32, GxB_ISNE_UINT32, GxB_ISEQ_UINT64, GxB_ISNE_UINT64, GxB_ISEQ_FP32, GxB_ISNE_FP32, GxB_ISEQ_FP64, GxB_ISNE_FP64, // complex: GxB_ISEQ_FC32, GxB_ISNE_FC32, GxB_ISEQ_FC64, GxB_ISNE_FC64, // z = (x > y) z = (x < y) z = (x >= y) z = (x <= y) GxB_ISGT_BOOL, GxB_ISLT_BOOL, GxB_ISGE_BOOL, GxB_ISLE_BOOL, GxB_ISGT_INT8, GxB_ISLT_INT8, GxB_ISGE_INT8, GxB_ISLE_INT8, GxB_ISGT_INT16, GxB_ISLT_INT16, GxB_ISGE_INT16, GxB_ISLE_INT16, GxB_ISGT_INT32, GxB_ISLT_INT32, GxB_ISGE_INT32, GxB_ISLE_INT32, GxB_ISGT_INT64, GxB_ISLT_INT64, GxB_ISGE_INT64, GxB_ISLE_INT64, GxB_ISGT_UINT8, GxB_ISLT_UINT8, GxB_ISGE_UINT8, GxB_ISLE_UINT8, GxB_ISGT_UINT16, GxB_ISLT_UINT16, GxB_ISGE_UINT16, GxB_ISLE_UINT16, GxB_ISGT_UINT32, GxB_ISLT_UINT32, GxB_ISGE_UINT32, GxB_ISLE_UINT32, GxB_ISGT_UINT64, GxB_ISLT_UINT64, GxB_ISGE_UINT64, GxB_ISLE_UINT64, GxB_ISGT_FP32, GxB_ISLT_FP32, GxB_ISGE_FP32, GxB_ISLE_FP32, GxB_ISGT_FP64, GxB_ISLT_FP64, GxB_ISGE_FP64, GxB_ISLE_FP64, // z = min(x,y) z = max (x,y) GrB_MIN_BOOL, GrB_MAX_BOOL, GrB_MIN_INT8, GrB_MAX_INT8, GrB_MIN_INT16, GrB_MAX_INT16, GrB_MIN_INT32, GrB_MAX_INT32, GrB_MIN_INT64, GrB_MAX_INT64, GrB_MIN_UINT8, GrB_MAX_UINT8, GrB_MIN_UINT16, GrB_MAX_UINT16, GrB_MIN_UINT32, GrB_MAX_UINT32, GrB_MIN_UINT64, GrB_MAX_UINT64, GrB_MIN_FP32, GrB_MAX_FP32, GrB_MIN_FP64, GrB_MAX_FP64, // Binary operators for each of the 11 real types: // The operators convert non-boolean types internally to boolean and return // a value 1 or 0 in the same type, for true or false. Each computes z = // ((x != 0) OP (y != 0)), where x, y, and z all the same type. These // operators are useful as multiplicative operators when combined with // non-boolean monoids of the same type. // z = (x || y) z = (x && y) z = (x != y) GxB_LOR_BOOL, GxB_LAND_BOOL, GxB_LXOR_BOOL, GxB_LOR_INT8, GxB_LAND_INT8, GxB_LXOR_INT8, GxB_LOR_INT16, GxB_LAND_INT16, GxB_LXOR_INT16, GxB_LOR_INT32, GxB_LAND_INT32, GxB_LXOR_INT32, GxB_LOR_INT64, GxB_LAND_INT64, GxB_LXOR_INT64, GxB_LOR_UINT8, GxB_LAND_UINT8, GxB_LXOR_UINT8, GxB_LOR_UINT16, GxB_LAND_UINT16, GxB_LXOR_UINT16, GxB_LOR_UINT32, GxB_LAND_UINT32, GxB_LXOR_UINT32, GxB_LOR_UINT64, GxB_LAND_UINT64, GxB_LXOR_UINT64, GxB_LOR_FP32, GxB_LAND_FP32, GxB_LXOR_FP32, GxB_LOR_FP64, GxB_LAND_FP64, GxB_LXOR_FP64, // Binary operators that operate only on boolean types: LOR, LAND, LXOR, // and LXNOR. The naming convention differs (_BOOL is not appended to the // name). They are the same as GxB_LOR_BOOL, GxB_LAND_BOOL, and // GxB_LXOR_BOOL, and GrB_EQ_BOOL, respectively. // z = (x || y) z = (x && y) z = (x != y) z = (x == y) GrB_LOR, GrB_LAND, GrB_LXOR, GrB_LXNOR, // Operators for floating-point reals: // z = atan2(x,y) z = hypot(x,y) z = fmod(x,y) z = remainder(x,y) GxB_ATAN2_FP32, GxB_HYPOT_FP32, GxB_FMOD_FP32, GxB_REMAINDER_FP32, GxB_ATAN2_FP64, GxB_HYPOT_FP64, GxB_FMOD_FP64, GxB_REMAINDER_FP64, // z = ldexp(x,y) z = copysign (x,y) GxB_LDEXP_FP32, GxB_COPYSIGN_FP32, GxB_LDEXP_FP64, GxB_COPYSIGN_FP64, // Bitwise operations on signed and unsigned integers: note that // bitwise operations on signed integers can lead to different results, // depending on your compiler; results are implementation-defined. // z = (x | y) z = (x & y) z = (x ^ y) z = ~(x ^ y) GrB_BOR_INT8, GrB_BAND_INT8, GrB_BXOR_INT8, GrB_BXNOR_INT8, GrB_BOR_INT16, GrB_BAND_INT16, GrB_BXOR_INT16, GrB_BXNOR_INT16, GrB_BOR_INT32, GrB_BAND_INT32, GrB_BXOR_INT32, GrB_BXNOR_INT32, GrB_BOR_INT64, GrB_BAND_INT64, GrB_BXOR_INT64, GrB_BXNOR_INT64, GrB_BOR_UINT8, GrB_BAND_UINT8, GrB_BXOR_UINT8, GrB_BXNOR_UINT8, GrB_BOR_UINT16, GrB_BAND_UINT16, GrB_BXOR_UINT16, GrB_BXNOR_UINT16, GrB_BOR_UINT32, GrB_BAND_UINT32, GrB_BXOR_UINT32, GrB_BXNOR_UINT32, GrB_BOR_UINT64, GrB_BAND_UINT64, GrB_BXOR_UINT64, GrB_BXNOR_UINT64, // z = bitget(x,y) z = bitset(x,y) z = bitclr(x,y) GxB_BGET_INT8, GxB_BSET_INT8, GxB_BCLR_INT8, GxB_BGET_INT16, GxB_BSET_INT16, GxB_BCLR_INT16, GxB_BGET_INT32, GxB_BSET_INT32, GxB_BCLR_INT32, GxB_BGET_INT64, GxB_BSET_INT64, GxB_BCLR_INT64, GxB_BGET_UINT8, GxB_BSET_UINT8, GxB_BCLR_UINT8, GxB_BGET_UINT16, GxB_BSET_UINT16, GxB_BCLR_UINT16, GxB_BGET_UINT32, GxB_BSET_UINT32, GxB_BCLR_UINT32, GxB_BGET_UINT64, GxB_BSET_UINT64, GxB_BCLR_UINT64 ; //------------------------------------------------------------------------------ // z=f(x,y) where z and x have the same type, but y is GrB_INT8 //------------------------------------------------------------------------------ // z = bitshift (x,y) computes z = x left-shifted by y bits if y >= 0, or z // = x right-shifted by (-y) bits if y < 0. z is equal to x if y is zero. // z and x have the same type, as given by the suffix on the operator name. // Since y must be signed, it cannot have the same type as x when x is // unsigned; it is always GrB_INT8 for all 8 versions of this operator. // The GxB_BSHIFT_* operators compute the arithmetic shift, and produce the // same results as the bitshift.m function, for all possible inputs. GB_PUBLIC GrB_BinaryOp // z = bitshift(x,y) GxB_BSHIFT_INT8, GxB_BSHIFT_INT16, GxB_BSHIFT_INT32, GxB_BSHIFT_INT64, GxB_BSHIFT_UINT8, GxB_BSHIFT_UINT16, GxB_BSHIFT_UINT32, GxB_BSHIFT_UINT64 ; //------------------------------------------------------------------------------ // z=f(x,y) where z is BOOL and the type of x,y is given by the suffix //------------------------------------------------------------------------------ GB_PUBLIC GrB_BinaryOp // Six comparators z=f(x,y) return their result as boolean, but // where x and y have the same type. The suffix in their names refers to // the type of x and y since z is always boolean. If used as multiply // operators in a semiring, they can only be combined with boolean monoids. // The _BOOL versions of these operators give the same results as their // IS*_BOOL counterparts. GrB_EQ_BOOL and GrB_LXNOR are identical. // z = (x == y) z = (x != y) z = (x > y) z = (x < y) GrB_EQ_BOOL, GrB_NE_BOOL, GrB_GT_BOOL, GrB_LT_BOOL, GrB_EQ_INT8, GrB_NE_INT8, GrB_GT_INT8, GrB_LT_INT8, GrB_EQ_INT16, GrB_NE_INT16, GrB_GT_INT16, GrB_LT_INT16, GrB_EQ_INT32, GrB_NE_INT32, GrB_GT_INT32, GrB_LT_INT32, GrB_EQ_INT64, GrB_NE_INT64, GrB_GT_INT64, GrB_LT_INT64, GrB_EQ_UINT8, GrB_NE_UINT8, GrB_GT_UINT8, GrB_LT_UINT8, GrB_EQ_UINT16, GrB_NE_UINT16, GrB_GT_UINT16, GrB_LT_UINT16, GrB_EQ_UINT32, GrB_NE_UINT32, GrB_GT_UINT32, GrB_LT_UINT32, GrB_EQ_UINT64, GrB_NE_UINT64, GrB_GT_UINT64, GrB_LT_UINT64, GrB_EQ_FP32, GrB_NE_FP32, GrB_GT_FP32, GrB_LT_FP32, GrB_EQ_FP64, GrB_NE_FP64, GrB_GT_FP64, GrB_LT_FP64, // complex: GxB_EQ_FC32, GxB_NE_FC32, GxB_EQ_FC64, GxB_NE_FC64, // z = (x >= y) z = (x <= y) GrB_GE_BOOL, GrB_LE_BOOL, GrB_GE_INT8, GrB_LE_INT8, GrB_GE_INT16, GrB_LE_INT16, GrB_GE_INT32, GrB_LE_INT32, GrB_GE_INT64, GrB_LE_INT64, GrB_GE_UINT8, GrB_LE_UINT8, GrB_GE_UINT16, GrB_LE_UINT16, GrB_GE_UINT32, GrB_LE_UINT32, GrB_GE_UINT64, GrB_LE_UINT64, GrB_GE_FP32, GrB_LE_FP32, GrB_GE_FP64, GrB_LE_FP64 ; //------------------------------------------------------------------------------ // z=f(x,y) where z is complex and the type of x,y is given by the suffix //------------------------------------------------------------------------------ GB_PUBLIC GrB_BinaryOp // z = cmplx (x,y) GxB_CMPLX_FP32, GxB_CMPLX_FP64 ; //============================================================================== // positional GrB_UnaryOp and GrB_BinaryOp operators //============================================================================== // Positional operators do not depend on the value of an entry, but its row or // column index in the matrix instead. For example, for an entry A(i,j), // first_i(A(i,j),y) is equal to i. These operators are useful for returning // node id's as the result of a semiring operation. If used as a mask, zero // has a special value, and thus z=first_i1(A(i,j),j) returns i+1 instead of i. // This can be useful when using a positional operator to construct a mask // matrix or vector for another GraphBLAS operation. It is also essential for // the @GrB interface, since the user view of matrix indices in @GrB is // 1-based, not 0-based. // When applied to a vector, j is always equal to 0. For a GxB_SCALAR, // both i and j are always zero. // GraphBLAS defines a GrB_Index as uint64_t, but these operators return a // GrB_INT32 or GrB_INT64 type, which is more flexible to use because the // result of this operator can be negated, to flag an entry for example. The // value -1 can be used to denote "no node" or "no position". GrB_INT32 is // useful for graphs smaller than 2^31 nodes. If the row or column index // exceeds INT32_MAX, the result is determined by the typecast from the // 64-bit index to the smaller 32-bit index. // Positional operators cannot be used to construct monoids. They can be used // as multiplicative operators in semirings, and as operators for GrB_eWise*, // and GrB_apply (bind first or second). For the latter, the operator cannot // depend on the bound scalar. // When used as multiplicative operators in a semiring, FIRSTJ and SECONDI // are identical. If C(i,j) += t is computed where t = A(i,k)*B(k,j), then // t = k in both cases. Likewise, FIRSTJ1 and SECONDI1 are identical. GB_PUBLIC GrB_BinaryOp GxB_FIRSTI_INT32, GxB_FIRSTI_INT64, // z = first_i(A(i,j),y) == i GxB_FIRSTI1_INT32, GxB_FIRSTI1_INT64, // z = first_i1(A(i,j),y) == i+1 GxB_FIRSTJ_INT32, GxB_FIRSTJ_INT64, // z = first_j(A(i,j),y) == j GxB_FIRSTJ1_INT32, GxB_FIRSTJ1_INT64, // z = first_j1(A(i,j),y) == j+1 GxB_SECONDI_INT32, GxB_SECONDI_INT64, // z = second_i(x,B(i,j)) == i GxB_SECONDI1_INT32, GxB_SECONDI1_INT64, // z = second_i1(x,B(i,j)) == i+1 GxB_SECONDJ_INT32, GxB_SECONDJ_INT64, // z = second_j(x,B(i,j)) == j GxB_SECONDJ1_INT32, GxB_SECONDJ1_INT64 ; // z = second_j1(x,B(i,j)) == j+1 GB_PUBLIC GrB_UnaryOp GxB_POSITIONI_INT32, GxB_POSITIONI_INT64, // z=position_i(A(i,j)) == i GxB_POSITIONI1_INT32, GxB_POSITIONI1_INT64, // z=position_i1(A(i,j)) == i+1 GxB_POSITIONJ_INT32, GxB_POSITIONJ_INT64, // z=position_j(A(i,j)) == j GxB_POSITIONJ1_INT32, GxB_POSITIONJ1_INT64 ;// z=position_j1(A(i,j)) == j+1 //============================================================================== // special GrB_BinaryOp for build methods only //============================================================================== // In GrB*build* methods, passing dup as NULL means that no duplicates are // tolerated. If duplicates appear, an error is returned. If dup is a binary // operator, it is applied to reduce duplicates to a single value. The // GxB_IGNORE_DUP is a special case. It is not an operator, but an indication // that any duplicates are to be ignored. GB_PUBLIC GrB_BinaryOp GxB_IGNORE_DUP ; //============================================================================== // About boolean and bitwise binary operators //============================================================================== // Some of the boolean operators compute the same thing with different names. // For example, x*y and x&&y give the same results for boolean x and y. // Operations such as x < y when x and y are boolean are treated as if true=1 // and false=0. Below is the truth table for all binary operators with boolean // inputs. This table is defined by how C typecasts boolean values for // non-boolean operations. For example, if x, y, and z are boolean, x = true, // and y = true, then z = x + y = true + true = true. DIV (x/y) is defined // below. RDIV (y/x) is shown as \ in the table; it is the same as 2nd. // x y 1st 2nd min max + - * / or and xor eq ne > < ge le \ pow pair // 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 1 0 1 1 // 0 1 0 1 0 1 1 1 0 0 1 0 1 0 1 0 1 0 1 1 0 1 // 1 0 1 0 0 1 1 1 0 1 1 0 1 0 1 1 0 1 0 0 1 1 // 1 1 1 1 1 1 1 0 1 1 1 1 0 1 0 0 0 1 1 1 1 1 // GraphBLAS includes a GrB_DIV_BOOL operator in its specification, but does // not define what boolean "division" means. SuiteSparse:GraphBLAS makes the // following interpretation. // GraphBLAS does not generate exceptions for divide-by-zero. Floating-point // divide-by-zero follows the IEEE 754 standard: 1/0 is +Inf, -1/0 is -Inf, and // 0/0 is NaN. For integer division by zero, if x is positive, x/0 is the // largest integer, -x/0 is the integer minimum (zero for unsigned integers), // and 0/0 is zero. For example, for int8, 1/0 is 127, and -1/0 is -128. For // uint8, 1/0 is 255 and 0/0 is zero. // Boolean division is treated as if it were an unsigned integer type with // true=1 and false=0, and with the max and min value being 1 and 0. As a // result, GrB_IDENTITY_BOOL, GrB_AINV_BOOL, and GrB_MINV_BOOL all give the // same result (z = x). // With this convention for boolean "division", there are 11 unique binary // operators that are purely boolean. Other named *_BOOL operators are // redundant but are included in GraphBLAS so that the name space of operators // is complete. Below is a list of all operators and their equivalents. // x: 0 0 1 1 // y: 0 1 0 1 // z: see below // // z = 0 0 0 0 0 (zero function, not predefined) // z = (x && y) 0 0 0 1 AND, MIN, TIMES // z = (x > y) 0 0 1 0 GT, ISGT, and set diff (x\y) // z = x 0 0 1 1 FIRST, DIV // // z = (x < y) 0 1 0 0 LT, ISLT, and set diff (y\x) // z = y 0 1 0 1 SECOND, RDIV // z = (x != y) 0 1 1 0 XOR, MINUS, RMINUS, NE, ISNE // z = (x || y) 0 1 1 1 OR, MAX, PLUS // // z = ~(x || y) 1 0 0 0 (nor(x,y) function, not predefined) // z = (x == y) 1 0 0 1 LXNOR, EQ, ISEQ // z = ~y 1 0 1 0 (not(y), not predefined) // z = (x >= y) 1 0 1 1 GE, ISGE, POW, and "x implies y" // // z = ~x 1 1 0 0 (not(x), not predefined) // z = (x <= y) 1 1 0 1 LE, ISLE, and "y implies x" // z = ~(x && y) 1 1 1 0 (nand(x,y) function, not predefined) // z = 1 1 1 1 1 PAIR, ONEB // // z = any(x,y) 0 . . 1 ANY (pick x or y arbitrarily) // Four more that have no _BOOL suffix are also redundant with the operators // of the form GxB_*_BOOL (GrB_LOR, GrB_LAND, GrB_LXOR, and GrB_LXNOR). // Note that the boolean binary operator space is not complete. Five other // boolean functions could be pre-defined as well: z = 0, nor(x,y), // nand(x,y), not(x), and not(y). // Four of the possible 16 bitwise operators are pre-defined: BOR, BAND, // BXOR, and BXNOR. This assumes that the computations for each bit are // entirely independent (so BSHIFT would not fit in the table above). //------------------------------------------------------------------------------ // methods for binary operators //------------------------------------------------------------------------------ typedef void (*GxB_binary_function) (void *, const void *, const void *) ; // GrB_BinaryOp_new creates a user-defined binary op, with an automatic // detection of the operator name. #undef GrB_BinaryOp_new #undef GrM_BinaryOp_new GB_PUBLIC GrB_Info GRB (BinaryOp_new) ( GrB_BinaryOp *binaryop, // handle for the new binary operator GxB_binary_function function, // pointer to the binary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x GrB_Type ytype // type of input y ) ; #define GrB_BinaryOp_new(op,f,z,x,y) \ GxB_BinaryOp_new(op,f,z,x,y, GB_STR(f), NULL) #define GrM_BinaryOp_new(op,f,z,x,y) \ GxM_BinaryOp_new(op,f,z,x,y, GB_STR(f), NULL) // GxB_BinaryOp_new creates a named user-defined binary op. GB_PUBLIC GrB_Info GxB_BinaryOp_new ( GrB_BinaryOp *op, // handle for the new binary operator GxB_binary_function function, // pointer to the binary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x GrB_Type ytype, // type of input y const char *binop_name, // name of the user function const char *binop_defn // definition of the user function ) ; // GB_BinaryOp_new is historical: use GxB_BinaryOp_new instead GB_PUBLIC GrB_Info GB_BinaryOp_new // not user-callable ( GrB_BinaryOp *binaryop, // handle for the new binary operator GxB_binary_function function, // pointer to the binary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x GrB_Type ytype, // type of input y const char *binop_name // name of the user function ) ; // NOTE: GxB_BinaryOp_ztype is historical. Use GxB_BinaryOp_ztype_name instead. GB_PUBLIC GrB_Info GxB_BinaryOp_ztype // return the type of z ( GrB_Type *ztype, // return type of output z GrB_BinaryOp binaryop // binary operator to query ) ; GB_PUBLIC GrB_Info GxB_BinaryOp_ztype_name // return the type_name of z ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_BinaryOp binaryop // binary operator to query ) ; // NOTE: GxB_BinaryOp_xtype is historical. Use GxB_BinaryOp_xtype_name instead. GB_PUBLIC GrB_Info GxB_BinaryOp_xtype // return the type of x ( GrB_Type *xtype, // return type of input x GrB_BinaryOp binaryop // binary operator to query ) ; GB_PUBLIC GrB_Info GxB_BinaryOp_xtype_name // return the type_name of x ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_BinaryOp binaryop // binary operator to query ) ; // NOTE: GxB_BinaryOp_ytype is historical. Use GxB_BinaryOp_ytype_name instead. GB_PUBLIC GrB_Info GxB_BinaryOp_ytype // return the type of y ( GrB_Type *ytype, // return type of input y GrB_BinaryOp binaryop // binary operator to query ) ; GB_PUBLIC GrB_Info GxB_BinaryOp_ytype_name // return the type_name of y ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_BinaryOp binaryop // binary operator to query ) ; GB_PUBLIC GrB_Info GrB_BinaryOp_free // free a user-created binary operator ( GrB_BinaryOp *binaryop // handle of binary operator to free ) ; //============================================================================== // GxB_SelectOp: select operators (historical) //============================================================================== // GrB_IndexUnaryOp should be used instead of GxB_SelectOp. // GxB_SelectOp is an operator used by GxB_select to select entries from an // input matrix A that are kept in the output C. If an entry A(i,j) in the // matrix A, of size nrows-by-ncols, has the value aij, then it calls the // select function as result = f (i, j, aij, thunk). If the function returns // true, the entry is kept in the output C. If f returns false, the entry is // not kept in C. The type of x for the GxB_SelectOp operator may be any of // the 11 built-in types, or any user-defined type. It may also be GrB_NULL, // to indicate that the function is type-generic and does not depend at all on // the value aij. In this case, x is passed to f as a NULL pointer. // The optional Thunk parameter to GxB_select is a GrB_Scalar. For built-in // select operators (TRIL, TRIU, DIAG, and OFFDIAG), Thunk must have any // built-in type, and thunk = (int64_t) Thunk is used to specify the diagonal // for these operators. Thunk may be NULL, in which case its value is treated // as zero, if it has a built-in type. The value of Thunk (if present) is not // modified by any built-in select operator. // For user-defined select operators, Thunk is not typecasted at all. If // the user operator is defined with a non-NULL Thunk input, then it must // be non-NULL and of the same type, when calling GxB_select. // GxB_SelectOp: a function z=f(i,j,x,thunk) for the GxB_Select operation. // The function f must have the signature: // bool f (GrB_Index i, GrB_Index j, const void *x, const void *thunk) ; // The values of i and j are guaranteed to be in the range 0 to // GrB_INDEX_MAX, and they can be safely typecasted to int64_t then negated, // if desired, without any risk of integer overflow. typedef struct GB_SelectOp_opaque *GxB_SelectOp ; //------------------------------------------------------------------------------ // built-in select operators (historical) //------------------------------------------------------------------------------ // GxB_select (C, Mask, accum, op, A, Thunk, desc) always returns a matrix C of // the same size as A (or A' if GrB_TRAN is in the descriptor). GB_PUBLIC GxB_SelectOp GxB_TRIL, // C=tril(A,thunk): returns true if ((j-i) <= thunk) GxB_TRIU, // C=triu(A,thunk): returns true if ((j-i) >= thunk) GxB_DIAG, // C=diag(A,thunk): returns true if ((j-i) == thunk) GxB_OFFDIAG, // C=A-diag(A,thunk): returns true if ((j-i) != thunk) GxB_NONZERO, // C=A(A ~= 0) GxB_EQ_ZERO, // C=A(A == 0) GxB_GT_ZERO, // C=A(A > 0) GxB_GE_ZERO, // C=A(A >= 0) GxB_LT_ZERO, // C=A(A < 0) GxB_LE_ZERO, // C=A(A <= 0) GxB_NE_THUNK, // C=A(A ~= thunk) GxB_EQ_THUNK, // C=A(A == thunk) GxB_GT_THUNK, // C=A(A > thunk) GxB_GE_THUNK, // C=A(A >= thunk) GxB_LT_THUNK, // C=A(A < thunk) GxB_LE_THUNK ; // C=A(A <= thunk) // For GxB_TRIL, GxB_TRIU, GxB_DIAG, and GxB_OFFDIAG, the parameter Thunk is a // GrB_Scalar of any built-in type. If GrB_NULL, or empty, Thunk is treated as // zero. Otherwise, the single entry is typecasted as (int64_t) Thunk. // These select operators do not depend on the values of A, but just their // position, and they work on matrices of any type. // For GxB_*ZERO, the result depends only on the value of A(i,j). The Thunk // parameter to GxB_select is ignored and may be GrB_NULL. // The operators GxB_TRIL, GxB_TRIU, GxB_DIAG, GxB_OFFDIAG, GxB_NONZERO, // GxB_EQ_ZERO, GxB_NE_THUNK, and GxB_EQ_THUNK work on all built-in types and // all user-defined types. // GxB_GT_*, GxB_GE_*, GxB_LT_*, and GxB_LE_* only work on the 11 built-in // types (not complex). They cannot be used for user-defined types. //------------------------------------------------------------------------------ // select operators: (historical) //------------------------------------------------------------------------------ // User-defined GxB_SelectOps are historical. New code should use // GrB_IndexUnaryOp_new instead. typedef bool (*GxB_select_function) // return true if A(i,j) is kept ( GrB_Index i, // row index of A(i,j) GrB_Index j, // column index of A(i,j) const void *x, // value of A(i,j) const void *thunk // optional input for select function ) ; #undef GxB_SelectOp_new #undef GxM_SelectOp_new GB_PUBLIC GrB_Info GXB (SelectOp_new) // create a new user-defined select operator ( GxB_SelectOp *selectop, // handle for the new select operator GxB_select_function function,// pointer to the select function GrB_Type xtype, // type of input x, or NULL if type-generic GrB_Type ttype // type of thunk, or NULL if not used ) ; #define GxB_SelectOp_new(op,f,x,t) GB_SelectOp_new (op,f,x,t, GB_STR(f)) #define GxM_SelectOp_new(op,f,x,t) GM_SelectOp_new (op,f,x,t, GB_STR(f)) // GB_SelectOp_new should not be called directly, but only through the // GxB_SelectOp_new macro (but use GrB_IndexUnaryOp_new instead). GB_PUBLIC GrB_Info GB_SelectOp_new // not user-callable ( GxB_SelectOp *selectop, // handle for the new select operator GxB_select_function function,// pointer to the select function GrB_Type xtype, // type of input x GrB_Type ttype, // type of thunk, or NULL if not used const char *name // name of the underlying function ) ; // GxB_SelectOp_xtype is historical. Use a GrB_IndexUnaryOp instead. GB_PUBLIC GrB_Info GxB_SelectOp_xtype // return the type of x ( GrB_Type *xtype, // return type of input x GxB_SelectOp selectop // select operator ) ; // GxB_SelectOp_ttype is historical. Use a GrB_IndexUnaryOp instead. GB_PUBLIC GrB_Info GxB_SelectOp_ttype // return the type of thunk ( GrB_Type *ttype, // return type of input thunk GxB_SelectOp selectop // select operator ) ; GB_PUBLIC GrB_Info GxB_SelectOp_free // free a user-created select operator ( GxB_SelectOp *selectop // handle of select operator to free ) ; //============================================================================== // GrB_IndexUnaryOp: a unary operator that depends on the row/col indices //============================================================================== // The indexop has the form z = f(aij, i, j, y) where aij is the numerical // value of the A(i,j) entry, i and j are its row and column index, and y // is a scalar. For vectors, it has the form z = f(vi, i, 0, y). typedef struct GB_IndexUnaryOp_opaque *GrB_IndexUnaryOp ; typedef void (*GxB_index_unary_function) ( void *z, // output value z, of type ztype const void *x, // input value x of type xtype; value of v(i) or A(i,j) GrB_Index i, // row index of A(i,j) GrB_Index j, // column index of A(i,j), or zero for v(i) const void *y // input scalar y ) ; // GrB_IndexUnaryOp_new creates a user-defined unary op, with an automatic // detection of the operator name. #undef GrB_IndexUnaryOp_new #undef GrM_IndexUnaryOp_new GB_PUBLIC GrB_Info GRB (IndexUnaryOp_new) // create a new user-defined IndexUnary op ( GrB_IndexUnaryOp *op, // handle for the new IndexUnary operator GxB_index_unary_function function, // pointer to IndexUnary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x (the A(i,j) entry) GrB_Type ytype // type of input y (the scalar) ) ; #define GrB_IndexUnaryOp_new(op,f,z,x,y) \ GxB_IndexUnaryOp_new(op,f,z,x,y, GB_STR(f), NULL) #define GrM_IndexUnaryOp_new(op,f,z,x,y) \ GxM_IndexUnaryOp_new(op,f,z,x,y, GB_STR(f), NULL) GB_PUBLIC GrB_Info GxB_IndexUnaryOp_new // create a named user-created IndexUnaryOp ( GrB_IndexUnaryOp *op, // handle for the new IndexUnary operator GxB_index_unary_function function, // pointer to index_unary function GrB_Type ztype, // type of output z GrB_Type xtype, // type of input x GrB_Type ytype, // type of input y const char *idxop_name, // name of the user function const char *idxop_defn // definition of the user function ) ; GB_PUBLIC GrB_Info GxB_IndexUnaryOp_ztype_name // return the type_name of z ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_IndexUnaryOp op // IndexUnary operator ) ; // For TRIL, TRIU, DIAG, OFFDIAG, COLLE, COLGT, ROWLE, and ROWGT, // the xtype_name is an empty string (""), since these functions do not depend // on the type of the matrix input. GB_PUBLIC GrB_Info GxB_IndexUnaryOp_xtype_name // return the type_name of x ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_IndexUnaryOp op // select operator ) ; GB_PUBLIC GrB_Info GxB_IndexUnaryOp_ytype_name // return the type_name of the scalary y ( char *type_name, // user array of size GxB_MAX_NAME_LEN const GrB_IndexUnaryOp op // select operator ) ; GB_PUBLIC GrB_Info GrB_IndexUnaryOp_free // free a user-created IndexUnaryOp ( GrB_IndexUnaryOp *op // handle of IndexUnary to free ) ; //------------------------------------------------------------------------------ // built-in IndexUnaryOps //------------------------------------------------------------------------------ // To facilitate computations with negative integers, the indices i and j are // of type int64_t. The scalar y has the type corresponding to the suffix // of the name of the operator. GB_PUBLIC GrB_IndexUnaryOp //-------------------------------------------------------------------------- // Result has the integer type INT32 or INT64, the same as the suffix //-------------------------------------------------------------------------- // These operators work on any data type, including user-defined. // ROWINDEX: (i+y): row index plus y GrB_ROWINDEX_INT32, GrB_ROWINDEX_INT64, // COLINDEX: (j+y): col index plus y GrB_COLINDEX_INT32, GrB_COLINDEX_INT64, // DIAGINDEX: (j-(i+y)): diagonal index plus y GrB_DIAGINDEX_INT32, GrB_DIAGINDEX_INT64, //-------------------------------------------------------------------------- // Result is bool, depending only on the indices i,j, and y //-------------------------------------------------------------------------- // These operators work on any data type, including user-defined. // The scalar y is int64. // TRIL: (j <= (i+y)): lower triangular part GrB_TRIL, // TRIU: (j >= (i+y)): upper triangular part GrB_TRIU, // DIAG: (j == (i+y)): diagonal GrB_DIAG, // OFFDIAG: (j != (i+y)): offdiagonal GrB_OFFDIAG, // COLLE: (j <= y): columns 0:y GrB_COLLE, // COLGT: (j > y): columns y+1:ncols-1 GrB_COLGT, // ROWLE: (i <= y): rows 0:y GrB_ROWLE, // ROWGT: (i > y): rows y+1:nrows-1 GrB_ROWGT, //-------------------------------------------------------------------------- // Result is bool, depending only on the value aij //-------------------------------------------------------------------------- // These operators work on matrices and vectors of any built-in type, // including complex types. aij and the scalar y have the same type as the // operator suffix. // VALUEEQ: (aij == y) GrB_VALUEEQ_INT8, GrB_VALUEEQ_UINT8, GrB_VALUEEQ_FP32, GrB_VALUEEQ_BOOL, GrB_VALUEEQ_INT16, GrB_VALUEEQ_UINT16, GrB_VALUEEQ_FP64, GrB_VALUEEQ_INT32, GrB_VALUEEQ_UINT32, GxB_VALUEEQ_FC32, GrB_VALUEEQ_INT64, GrB_VALUEEQ_UINT64, GxB_VALUEEQ_FC64, // VALUENE: (aij != y) GrB_VALUENE_INT8, GrB_VALUENE_UINT8, GrB_VALUENE_FP32, GrB_VALUENE_BOOL, GrB_VALUENE_INT16, GrB_VALUENE_UINT16, GrB_VALUENE_FP64, GrB_VALUENE_INT32, GrB_VALUENE_UINT32, GxB_VALUENE_FC32, GrB_VALUENE_INT64, GrB_VALUENE_UINT64, GxB_VALUENE_FC64, // These operators work on matrices and vectors of any real (non-complex) // built-in type. // VALUELT: (aij < y) GrB_VALUELT_INT8, GrB_VALUELT_UINT8, GrB_VALUELT_FP32, GrB_VALUELT_BOOL, GrB_VALUELT_INT16, GrB_VALUELT_UINT16, GrB_VALUELT_FP64, GrB_VALUELT_INT32, GrB_VALUELT_UINT32, GrB_VALUELT_INT64, GrB_VALUELT_UINT64, // VALUELE: (aij <= y) GrB_VALUELE_INT8, GrB_VALUELE_UINT8, GrB_VALUELE_FP32, GrB_VALUELE_BOOL, GrB_VALUELE_INT16, GrB_VALUELE_UINT16, GrB_VALUELE_FP64, GrB_VALUELE_INT32, GrB_VALUELE_UINT32, GrB_VALUELE_INT64, GrB_VALUELE_UINT64, // VALUEGT: (aij > y) GrB_VALUEGT_INT8, GrB_VALUEGT_UINT8, GrB_VALUEGT_FP32, GrB_VALUEGT_BOOL, GrB_VALUEGT_INT16, GrB_VALUEGT_UINT16, GrB_VALUEGT_FP64, GrB_VALUEGT_INT32, GrB_VALUEGT_UINT32, GrB_VALUEGT_INT64, GrB_VALUEGT_UINT64, // VALUEGE: (aij >= y) GrB_VALUEGE_INT8, GrB_VALUEGE_UINT8, GrB_VALUEGE_FP32, GrB_VALUEGE_BOOL, GrB_VALUEGE_INT16, GrB_VALUEGE_UINT16, GrB_VALUEGE_FP64, GrB_VALUEGE_INT32, GrB_VALUEGE_UINT32, GrB_VALUEGE_INT64, GrB_VALUEGE_UINT64 ; //============================================================================== // GrB_Monoid //============================================================================== // A monoid is an associative operator z=op(x,y) where all three types of z, x, // and y are identical. The monoid also has an identity element, such that // op(x,identity) = op(identity,x) = x. typedef struct GB_Monoid_opaque *GrB_Monoid ; GB_PUBLIC GrB_Info GrB_Monoid_new_BOOL // create a new boolean monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid bool identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_INT8 // create a new int8 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int8_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_UINT8 // create a new uint8 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint8_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_INT16 // create a new int16 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int16_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_UINT16 // create a new uint16 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint16_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_INT32 // create a new int32 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int32_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_UINT32 // create a new uint32 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint32_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_INT64 // create a new int64 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int64_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_UINT64 // create a new uint64 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint64_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_FP32 // create a new float monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid float identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_FP64 // create a new double monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid double identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_new_FC32 // create a new float complex monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid GxB_FC32_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_new_FC64 // create a new double complex monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid GxB_FC64_t identity // identity value of the monoid ) ; GB_PUBLIC GrB_Info GrB_Monoid_new_UDT // create a monoid with a user-defined type ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid void *identity // identity value of the monoid ) ; // Type-generic method for creating a new monoid: /* GB_PUBLIC GrB_Info GrB_Monoid_new // create a monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid <type> identity // identity value of the monoid ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Monoid_new(monoid,op,identity) \ _Generic \ ( \ (identity), \ GB_CASES (, GrB, Monoid_new) \ ) \ (monoid, op, identity) #endif // GxB_Monoid_terminal_new is identical to GrB_Monoid_new, except that a // terminal value can be specified. The terminal may be NULL, which indicates // no terminal value (and in this case, it is identical to GrB_Monoid_new). // The terminal value, if not NULL, must have the same type as the identity. GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_BOOL // create a new boolean monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid bool identity, // identity value of the monoid bool terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_INT8 // create a new int8 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int8_t identity, // identity value of the monoid int8_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_UINT8 // create a new uint8 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint8_t identity, // identity value of the monoid uint8_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_INT16 // create a new int16 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int16_t identity, // identity value of the monoid int16_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_UINT16 // create a new uint16 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint16_t identity, // identity value of the monoid uint16_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_INT32 // create a new int32 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int32_t identity, // identity value of the monoid int32_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_UINT32 // create a new uint32 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint32_t identity, // identity value of the monoid uint32_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_INT64 // create a new int64 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid int64_t identity, // identity value of the monoid int64_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_UINT64 // create a new uint64 monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid uint64_t identity, // identity value of the monoid uint64_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_FP32 // create a new float monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid float identity, // identity value of the monoid float terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_FP64 // create a new double monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid double identity, // identity value of the monoid double terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_FC32 // create a new float complex monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid GxB_FC32_t identity, // identity value of the monoid GxB_FC32_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_FC64 // create a new double complex monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid GxB_FC64_t identity, // identity value of the monoid GxB_FC64_t terminal // terminal value of the monoid ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal_new_UDT // create a monoid with a user type ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid void *identity, // identity value of the monoid void *terminal // terminal value of the monoid ) ; // Type-generic method for creating a new monoid with a terminal value: /* GB_PUBLIC GrB_Info GxB_Monoid_terminal_new // create a monoid ( GrB_Monoid *monoid, // handle of monoid to create GrB_BinaryOp op, // binary operator of the monoid <type> identity, // identity value of the monoid <type> terminal // terminal value of the monoid ) ; */ #if GxB_STDC_VERSION >= 201112L #define GxB_Monoid_terminal_new(monoid,op,identity,terminal) \ _Generic \ ( \ (identity), \ GB_CASES (, GxB, Monoid_terminal_new) \ ) \ (monoid, op, identity, terminal) #endif GB_PUBLIC GrB_Info GxB_Monoid_operator // return the monoid operator ( GrB_BinaryOp *op, // returns the binary op of the monoid GrB_Monoid monoid // monoid to query ) ; GB_PUBLIC GrB_Info GxB_Monoid_identity // return the monoid identity ( void *identity, // returns the identity of the monoid GrB_Monoid monoid // monoid to query ) ; GB_PUBLIC GrB_Info GxB_Monoid_terminal // return the monoid terminal ( bool *has_terminal, // true if the monoid has a terminal value void *terminal, // returns the terminal of the monoid, // unmodified if has_terminal is false GrB_Monoid monoid // monoid to query ) ; GB_PUBLIC GrB_Info GrB_Monoid_free // free a user-created monoid ( GrB_Monoid *monoid // handle of monoid to free ) ; //============================================================================== // GrB_Semiring //============================================================================== typedef struct GB_Semiring_opaque *GrB_Semiring ; GB_PUBLIC GrB_Info GrB_Semiring_new // create a semiring ( GrB_Semiring *semiring, // handle of semiring to create GrB_Monoid add, // add monoid of the semiring GrB_BinaryOp multiply // multiply operator of the semiring ) ; GB_PUBLIC GrB_Info GxB_Semiring_add // return the add monoid of a semiring ( GrB_Monoid *add, // returns add monoid of the semiring GrB_Semiring semiring // semiring to query ) ; GB_PUBLIC GrB_Info GxB_Semiring_multiply // return multiply operator of a semiring ( GrB_BinaryOp *multiply, // returns multiply operator of the semiring GrB_Semiring semiring // semiring to query ) ; GB_PUBLIC GrB_Info GrB_Semiring_free // free a user-created semiring ( GrB_Semiring *semiring // handle of semiring to free ) ; //============================================================================== // GrB_Scalar: a GraphBLAS scalar //============================================================================== // GxB_Scalar has become GrB_Scalar. The older name GxB_Scalar is kept as // historical, but GrB_Scalar should be used instead. typedef struct GB_Scalar_opaque *GxB_Scalar ; // historical: use GrB_Scalar typedef struct GB_Scalar_opaque *GrB_Scalar ; // use this instead // These methods create, free, copy, and clear a GrB_Scalar. The nvals, // and type methods return basic information about a GrB_Scalar. GB_PUBLIC GrB_Info GrB_Scalar_new // create a new GrB_Scalar with no entry ( GrB_Scalar *s, // handle of GrB_Scalar to create GrB_Type type // type of GrB_Scalar to create ) ; GB_PUBLIC GrB_Info GrB_Scalar_dup // make an exact copy of a GrB_Scalar ( GrB_Scalar *s, // handle of output GrB_Scalar to create const GrB_Scalar t // input GrB_Scalar to copy ) ; GB_PUBLIC GrB_Info GrB_Scalar_clear // clear a GrB_Scalar of its entry ( // type remains unchanged. GrB_Scalar s // GrB_Scalar to clear ) ; GB_PUBLIC GrB_Info GrB_Scalar_nvals // get the number of entries in a GrB_Scalar ( GrB_Index *nvals, // GrB_Scalar has nvals entries (0 or 1) const GrB_Scalar s // GrB_Scalar to query ) ; // NOTE: GxB_Scalar_type is historical. Use GxB_Scalar_type_name instead. GB_PUBLIC GrB_Info GxB_Scalar_type // get the type of a GrB_Scalar ( GrB_Type *type, // returns the type of the GrB_Scalar const GrB_Scalar s // GrB_Scalar to query ) ; GB_PUBLIC GrB_Info GxB_Scalar_type_name // return the name of the type of a scalar ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). const GrB_Scalar s // GrB_Scalar to query ) ; GB_PUBLIC GrB_Info GxB_Scalar_memoryUsage // return # of bytes used for a scalar ( size_t *size, // # of bytes used by the scalar s const GrB_Scalar s // GrB_Scalar to query ) ; GB_PUBLIC GrB_Info GrB_Scalar_free // free a GrB_Scalar ( GrB_Scalar *s // handle of GrB_Scalar to free ) ; // historical names identical to GrB_Scalar_methods above: GB_PUBLIC GrB_Info GxB_Scalar_new (GrB_Scalar *s, GrB_Type type) ; GB_PUBLIC GrB_Info GxB_Scalar_dup (GrB_Scalar *s, const GrB_Scalar t) ; GB_PUBLIC GrB_Info GxB_Scalar_clear (GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_nvals (GrB_Index *nvals, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_free (GrB_Scalar *s) ; //------------------------------------------------------------------------------ // GrB_Scalar_setElement //------------------------------------------------------------------------------ // Set a single GrB_Scalar s, from a user scalar x: s = x, typecasting from the // type of x to the type of w as needed. GB_PUBLIC GrB_Info GrB_Scalar_setElement_BOOL // s = x ( GrB_Scalar s, // GrB_Scalar to modify bool x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_INT8 // s = x ( GrB_Scalar s, // GrB_Scalar to modify int8_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_UINT8 // s = x ( GrB_Scalar s, // GrB_Scalar to modify uint8_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_INT16 // s = x ( GrB_Scalar s, // GrB_Scalar to modify int16_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_UINT16 // s = x ( GrB_Scalar s, // GrB_Scalar to modify uint16_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_INT32 // s = x ( GrB_Scalar s, // GrB_Scalar to modify int32_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_UINT32 // s = x ( GrB_Scalar s, // GrB_Scalar to modify uint32_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_INT64 // s = x ( GrB_Scalar s, // GrB_Scalar to modify int64_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_UINT64 // s = x ( GrB_Scalar s, // GrB_Scalar to modify uint64_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_FP32 // s = x ( GrB_Scalar s, // GrB_Scalar to modify float x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_FP64 // s = x ( GrB_Scalar s, // GrB_Scalar to modify double x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_FC32 // s = x ( GrB_Scalar s, // GrB_Scalar to modify GxB_FC32_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_FC64 // s = x ( GrB_Scalar s, // GrB_Scalar to modify GxB_FC64_t x // user scalar to assign to s ) ; GB_PUBLIC GrB_Info GrB_Scalar_setElement_UDT // s = x ( GrB_Scalar s, // GrB_Scalar to modify void *x // user scalar to assign to s ) ; // historical names identical to GrB_Scalar_methods above: GB_PUBLIC GrB_Info GxB_Scalar_setElement_BOOL (GrB_Scalar s, bool x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT8 (GrB_Scalar s, int8_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT16 (GrB_Scalar s, int16_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT32 (GrB_Scalar s, int32_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_INT64 (GrB_Scalar s, int64_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT8 (GrB_Scalar s, uint8_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT16 (GrB_Scalar s, uint16_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT32 (GrB_Scalar s, uint32_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_UINT64 (GrB_Scalar s, uint64_t x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_FP32 (GrB_Scalar s, float x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_FP64 (GrB_Scalar s, double x) ; GB_PUBLIC GrB_Info GxB_Scalar_setElement_UDT (GrB_Scalar s, void *x) ; // Type-generic version: x can be any supported C type or void * for a // user-defined type. /* GB_PUBLIC GrB_Info GrB_Scalar_setElement // s = x ( GrB_Scalar s, // GrB_Scalar to modify <type> x // user scalar to assign to s ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Scalar_setElement(s,x) \ _Generic \ ( \ (x), \ GB_CASES (, GrB, Scalar_setElement) \ ) \ (s, x) #define GxB_Scalar_setElement(s,x) GrB_Scalar_setElement (s, x) #endif //------------------------------------------------------------------------------ // GrB_Scalar_extractElement //------------------------------------------------------------------------------ // Extract a single entry from a GrB_Scalar, x = s, typecasting from the type // of s to the type of x as needed. GB_PUBLIC GrB_Info GrB_Scalar_extractElement_BOOL // x = s ( bool *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_INT8 // x = s ( int8_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_UINT8 // x = s ( uint8_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_INT16 // x = s ( int16_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_UINT16 // x = s ( uint16_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_INT32 // x = s ( int32_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_UINT32 // x = s ( uint32_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_INT64 // x = s ( int64_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_UINT64 // x = s ( uint64_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_FP32 // x = s ( float *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_FP64 // x = s ( double *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FC32 // x = s ( GxB_FC32_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FC64 // x = s ( GxB_FC64_t *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; GB_PUBLIC GrB_Info GrB_Scalar_extractElement_UDT // x = s ( void *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; // historical names identical to GrB_Scalar_methods above: GB_PUBLIC GrB_Info GxB_Scalar_extractElement_BOOL (bool *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT8 (int8_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT16 (int16_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT32 (int32_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_INT64 (int64_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT8 (uint8_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT16 (uint16_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT32 (uint32_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UINT64 (uint64_t *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FP32 (float *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_FP64 (double *x, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GxB_Scalar_extractElement_UDT (void *x, const GrB_Scalar s) ; // Type-generic version: x can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Scalar_extractElement // x = s ( <type> *x, // user scalar extracted const GrB_Scalar s // GrB_Scalar to extract an entry from ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Scalar_extractElement(x,s) \ _Generic \ ( \ (x), \ GB_CASES (*, GrB, Scalar_extractElement) \ ) \ (x, s) #define GxB_Scalar_extractElement(x,s) GrB_Scalar_extractElement (x, s) #endif //============================================================================== // GrB_Vector: a GraphBLAS vector //============================================================================== typedef struct GB_Vector_opaque *GrB_Vector ; // These methods create, free, copy, and clear a vector. The size, nvals, // and type methods return basic information about a vector. GB_PUBLIC GrB_Info GrB_Vector_new // create a new vector with no entries ( GrB_Vector *v, // handle of vector to create GrB_Type type, // type of vector to create GrB_Index n // vector dimension is n-by-1 // (n must be <= GrB_INDEX_MAX+1) ) ; GB_PUBLIC GrB_Info GrB_Vector_dup // make an exact copy of a vector ( GrB_Vector *w, // handle of output vector to create const GrB_Vector u // input vector to copy ) ; GB_PUBLIC GrB_Info GrB_Vector_clear // clear a vector of all entries; ( // type and dimension remain unchanged. GrB_Vector v // vector to clear ) ; GB_PUBLIC GrB_Info GrB_Vector_size // get the dimension of a vector ( GrB_Index *n, // vector dimension is n-by-1 const GrB_Vector v // vector to query ) ; GB_PUBLIC GrB_Info GrB_Vector_nvals // get the number of entries in a vector ( GrB_Index *nvals, // vector has nvals entries const GrB_Vector v // vector to query ) ; // NOTE: GxB_Vector_type is historical. Use GxB_Vector_type_name instead. GB_PUBLIC GrB_Info GxB_Vector_type // get the type of a vector ( GrB_Type *type, // returns the type of the vector const GrB_Vector v // vector to query ) ; GB_PUBLIC GrB_Info GxB_Vector_type_name // return the name of the type of a vector ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). const GrB_Vector v // vector to query ) ; GB_PUBLIC GrB_Info GxB_Vector_memoryUsage // return # of bytes used for a vector ( size_t *size, // # of bytes used by the vector v const GrB_Vector v // vector to query ) ; GB_PUBLIC GrB_Info GxB_Vector_iso // return iso status of a vector ( bool *iso, // true if the vector is iso-valued const GrB_Vector v // vector to query ) ; GB_PUBLIC GrB_Info GrB_Vector_free // free a vector ( GrB_Vector *v // handle of vector to free ) ; //------------------------------------------------------------------------------ // GrB_Vector_build //------------------------------------------------------------------------------ // GrB_Vector_build: w = sparse (I,1,X), but using any // associative operator to assemble duplicate entries. GB_PUBLIC GrB_Info GrB_Vector_build_BOOL // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const bool *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_INT8 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const int8_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_UINT8 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const uint8_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_INT16 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const int16_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_UINT16 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const uint16_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_INT32 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const int32_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_UINT32 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const uint32_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_INT64 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const int64_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_UINT64 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const uint64_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_FP32 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const float *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_FP64 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const double *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GxB_Vector_build_FC32 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const GxB_FC32_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GxB_Vector_build_FC64 // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const GxB_FC64_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Vector_build_UDT // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const void *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GxB_Vector_build_Scalar // build a vector from (i,scalar) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples GrB_Scalar scalar, // value for all tuples GrB_Index nvals // number of tuples ) ; // Type-generic version: X can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Vector_build // build a vector from (I,X) tuples ( GrB_Vector w, // vector to build const GrB_Index *I, // array of row indices of tuples const <type> *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Vector_build(w,I,X,nvals,dup) \ _Generic \ ( \ (X), \ GB_CASES (*, GrB, Vector_build) \ ) \ (w, I, ((const void *) (X)), nvals, dup) #endif //------------------------------------------------------------------------------ // GrB_Vector_setElement //------------------------------------------------------------------------------ // Set a single scalar in a vector, w(i) = x, typecasting from the type of x to // the type of w as needed. GB_PUBLIC GrB_Info GrB_Vector_setElement_BOOL // w(i) = x ( GrB_Vector w, // vector to modify bool x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_INT8 // w(i) = x ( GrB_Vector w, // vector to modify int8_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_UINT8 // w(i) = x ( GrB_Vector w, // vector to modify uint8_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_INT16 // w(i) = x ( GrB_Vector w, // vector to modify int16_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_UINT16 // w(i) = x ( GrB_Vector w, // vector to modify uint16_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_INT32 // w(i) = x ( GrB_Vector w, // vector to modify int32_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_UINT32 // w(i) = x ( GrB_Vector w, // vector to modify uint32_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_INT64 // w(i) = x ( GrB_Vector w, // vector to modify int64_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_UINT64 // w(i) = x ( GrB_Vector w, // vector to modify uint64_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_FP32 // w(i) = x ( GrB_Vector w, // vector to modify float x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_FP64 // w(i) = x ( GrB_Vector w, // vector to modify double x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GxB_Vector_setElement_FC32 // w(i) = x ( GrB_Vector w, // vector to modify GxB_FC32_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GxB_Vector_setElement_FC64 // w(i) = x ( GrB_Vector w, // vector to modify GxB_FC64_t x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_UDT // w(i) = x ( GrB_Vector w, // vector to modify void *x, // scalar to assign to w(i) GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_setElement_Scalar // w(i) = x ( GrB_Vector w, // vector to modify GrB_Scalar x, // scalar to assign to w(i) GrB_Index i // row index ) ; // Type-generic version: x can be any supported C type or void * for a // user-defined type. /* GB_PUBLIC GrB_Info GrB_Vector_setElement // w(i) = x ( GrB_Vector w, // vector to modify <type> x, // scalar to assign to w(i) GrB_Index i // row index ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Vector_setElement(w,x,i) \ _Generic \ ( \ (x), \ GB_CASES (, GrB, Vector_setElement), \ default: GrB_Vector_setElement_Scalar \ ) \ (w, x, i) #endif //------------------------------------------------------------------------------ // GrB_Vector_extractElement //------------------------------------------------------------------------------ // Extract a single entry from a vector, x = v(i), typecasting from the type of // v to the type of x as needed. GB_PUBLIC GrB_Info GrB_Vector_extractElement_BOOL // x = v(i) ( bool *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_INT8 // x = v(i) ( int8_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_UINT8 // x = v(i) ( uint8_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_INT16 // x = v(i) ( int16_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_UINT16 // x = v(i) ( uint16_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_INT32 // x = v(i) ( int32_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_UINT32 // x = v(i) ( uint32_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_INT64 // x = v(i) ( int64_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_UINT64 // x = v(i) ( uint64_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_FP32 // x = v(i) ( float *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_FP64 // x = v(i) ( double *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GxB_Vector_extractElement_FC32 // x = v(i) ( GxB_FC32_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GxB_Vector_extractElement_FC64 // x = v(i) ( GxB_FC64_t *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_UDT // x = v(i) ( void *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; GB_PUBLIC GrB_Info GrB_Vector_extractElement_Scalar // x = v(i) ( GrB_Scalar x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; // Type-generic version: x can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Vector_extractElement // x = v(i) ( <type> *x, // scalar extracted const GrB_Vector v, // vector to extract an entry from GrB_Index i // row index ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Vector_extractElement(x,v,i) \ _Generic \ ( \ (x), \ GB_CASES (*, GrB, Vector_extractElement), \ default: GrB_Vector_extractElement_Scalar \ ) \ (x, v, i) #endif //------------------------------------------------------------------------------ // GrB_Vector_removeElement //------------------------------------------------------------------------------ // GrB_Vector_removeElement (v,i) removes the element v(i) from the vector v. GB_PUBLIC GrB_Info GrB_Vector_removeElement ( GrB_Vector v, // vector to remove an element from GrB_Index i // index ) ; //------------------------------------------------------------------------------ // GrB_Vector_extractTuples //------------------------------------------------------------------------------ // Extracts all tuples from a vector, like [I,~,X] = find (v). If // any parameter I and/or X is NULL, then that component is not extracted. For // example, to extract just the row indices, pass I as non-NULL, and X as NULL. // This is like [I,~,~] = find (v). GB_PUBLIC GrB_Info GrB_Vector_extractTuples_BOOL // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples bool *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_INT8 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples int8_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_UINT8 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples uint8_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_INT16 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples int16_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_UINT16 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples uint16_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_INT32 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples int32_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_UINT32 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples uint32_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_INT64 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples int64_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_UINT64 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples uint64_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_FP32 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples float *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_FP64 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples double *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GxB_Vector_extractTuples_FC32 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples GxB_FC32_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GxB_Vector_extractTuples_FC64 // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples GxB_FC64_t *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Vector_extractTuples_UDT // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples void *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; // Type-generic version: X can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Vector_extractTuples // [I,~,X] = find (v) ( GrB_Index *I, // array for returning row indices of tuples <type> *X, // array for returning values of tuples GrB_Index *nvals, // I, X size on input; # tuples on output const GrB_Vector v // vector to extract tuples from ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Vector_extractTuples(I,X,nvals,v) \ _Generic \ ( \ (X), \ GB_CASES (*, GrB, Vector_extractTuples) \ ) \ (I, X, nvals, v) #endif //============================================================================== // GrB_Matrix: a GraphBLAS matrix //============================================================================== typedef struct GB_Matrix_opaque *GrB_Matrix ; // These methods create, free, copy, and clear a matrix. The nrows, ncols, // nvals, and type methods return basic information about a matrix. GB_PUBLIC GrB_Info GrB_Matrix_new // create a new matrix with no entries ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // matrix dimension is nrows-by-ncols GrB_Index ncols // (nrows and ncols must be <= GrB_INDEX_MAX+1) ) ; GB_PUBLIC GrB_Info GrB_Matrix_dup // make an exact copy of a matrix ( GrB_Matrix *C, // handle of output matrix to create const GrB_Matrix A // input matrix to copy ) ; GB_PUBLIC GrB_Info GrB_Matrix_clear // clear a matrix of all entries; ( // type and dimensions remain unchanged GrB_Matrix A // matrix to clear ) ; GB_PUBLIC GrB_Info GrB_Matrix_nrows // get the number of rows of a matrix ( GrB_Index *nrows, // matrix has nrows rows const GrB_Matrix A // matrix to query ) ; GB_PUBLIC GrB_Info GrB_Matrix_ncols // get the number of columns of a matrix ( GrB_Index *ncols, // matrix has ncols columns const GrB_Matrix A // matrix to query ) ; GB_PUBLIC GrB_Info GrB_Matrix_nvals // get the number of entries in a matrix ( GrB_Index *nvals, // matrix has nvals entries const GrB_Matrix A // matrix to query ) ; // NOTE: GxB_Matrix_type is historical. Use GxB_Matrix_type_name instead. GB_PUBLIC GrB_Info GxB_Matrix_type // get the type of a matrix ( GrB_Type *type, // returns the type of the matrix const GrB_Matrix A // matrix to query ) ; GB_PUBLIC GrB_Info GxB_Matrix_type_name // return the name of the type of a matrix ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). const GrB_Matrix A // matrix to query ) ; GB_PUBLIC GrB_Info GxB_Matrix_memoryUsage // return # of bytes used for a matrix ( size_t *size, // # of bytes used by the matrix A const GrB_Matrix A // matrix to query ) ; GB_PUBLIC GrB_Info GxB_Matrix_iso // return iso status of a matrix ( bool *iso, // true if the matrix is iso-valued const GrB_Matrix A // matrix to query ) ; GB_PUBLIC GrB_Info GrB_Matrix_free // free a matrix ( GrB_Matrix *A // handle of matrix to free ) ; //------------------------------------------------------------------------------ // GrB_Matrix_build //------------------------------------------------------------------------------ // GrB_Matrix_build: C = sparse (I,J,X), but using any // associative operator to assemble duplicate entries. GB_PUBLIC GrB_Info GrB_Matrix_build_BOOL // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const bool *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_INT8 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const int8_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_UINT8 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const uint8_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_INT16 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const int16_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_UINT16 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const uint16_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_INT32 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const int32_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_UINT32 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const uint32_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_INT64 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const int64_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_UINT64 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const uint64_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_FP32 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const float *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_FP64 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const double *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GxB_Matrix_build_FC32 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const GxB_FC32_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GxB_Matrix_build_FC64 // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const GxB_FC64_t *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GrB_Matrix_build_UDT // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const void *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; GB_PUBLIC GrB_Info GxB_Matrix_build_Scalar // build a matrix from (I,J,scalar) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples GrB_Scalar scalar, // value for all tuples GrB_Index nvals // number of tuples ) ; // Type-generic version: X can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Matrix_build // build a matrix from (I,J,X) tuples ( GrB_Matrix C, // matrix to build const GrB_Index *I, // array of row indices of tuples const GrB_Index *J, // array of column indices of tuples const <type> *X, // array of values of tuples GrB_Index nvals, // number of tuples const GrB_BinaryOp dup // binary function to assemble duplicates ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Matrix_build(C,I,J,X,nvals,dup) \ _Generic \ ( \ (X), \ GB_CASES (*, GrB, Matrix_build) \ ) \ (C, I, J, ((const void *) (X)), nvals, dup) #endif //------------------------------------------------------------------------------ // GrB_Matrix_setElement //------------------------------------------------------------------------------ // Set a single entry in a matrix, C(i,j) = x, typecasting // from the type of x to the type of C, as needed. GB_PUBLIC GrB_Info GrB_Matrix_setElement_BOOL // C (i,j) = x ( GrB_Matrix C, // matrix to modify bool x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_INT8 // C (i,j) = x ( GrB_Matrix C, // matrix to modify int8_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_UINT8 // C (i,j) = x ( GrB_Matrix C, // matrix to modify uint8_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_INT16 // C (i,j) = x ( GrB_Matrix C, // matrix to modify int16_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_UINT16 // C (i,j) = x ( GrB_Matrix C, // matrix to modify uint16_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_INT32 // C (i,j) = x ( GrB_Matrix C, // matrix to modify int32_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_UINT32 // C (i,j) = x ( GrB_Matrix C, // matrix to modify uint32_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_INT64 // C (i,j) = x ( GrB_Matrix C, // matrix to modify int64_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_UINT64 // C (i,j) = x ( GrB_Matrix C, // matrix to modify uint64_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_FP32 // C (i,j) = x ( GrB_Matrix C, // matrix to modify float x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_FP64 // C (i,j) = x ( GrB_Matrix C, // matrix to modify double x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GxB_Matrix_setElement_FC32 // C (i,j) = x ( GrB_Matrix C, // matrix to modify GxB_FC32_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GxB_Matrix_setElement_FC64 // C (i,j) = x ( GrB_Matrix C, // matrix to modify GxB_FC64_t x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_UDT // C (i,j) = x ( GrB_Matrix C, // matrix to modify void *x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_setElement_Scalar // C (i,j) = x ( GrB_Matrix C, // matrix to modify GrB_Scalar x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; // Type-generic version: x can be any supported C type or void * for a // user-defined type. /* GB_PUBLIC GrB_Info GrB_Matrix_setElement // C (i,j) = x ( GrB_Matrix C, // matrix to modify <type> x, // scalar to assign to C(i,j) GrB_Index i, // row index GrB_Index j // column index ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Matrix_setElement(C,x,i,j) \ _Generic \ ( \ (x), \ GB_CASES (, GrB, Matrix_setElement), \ default: GrB_Matrix_setElement_Scalar \ ) \ (C, x, i, j) #endif //------------------------------------------------------------------------------ // GrB_Matrix_extractElement //------------------------------------------------------------------------------ // Extract a single entry from a matrix, x = A(i,j), typecasting from the type // of A to the type of x, as needed. GB_PUBLIC GrB_Info GrB_Matrix_extractElement_BOOL // x = A(i,j) ( bool *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_INT8 // x = A(i,j) ( int8_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_UINT8 // x = A(i,j) ( uint8_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_INT16 // x = A(i,j) ( int16_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_UINT16 // x = A(i,j) ( uint16_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_INT32 // x = A(i,j) ( int32_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_UINT32 // x = A(i,j) ( uint32_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_INT64 // x = A(i,j) ( int64_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_UINT64 // x = A(i,j) ( uint64_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_FP32 // x = A(i,j) ( float *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_FP64 // x = A(i,j) ( double *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GxB_Matrix_extractElement_FC32 // x = A(i,j) ( GxB_FC32_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GxB_Matrix_extractElement_FC64 // x = A(i,j) ( GxB_FC64_t *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_UDT // x = A(i,j) ( void *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractElement_Scalar // x = A(i,j) ( GrB_Scalar x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; // Type-generic version: x can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Matrix_extractElement // x = A(i,j) ( <type> *x, // extracted scalar const GrB_Matrix A, // matrix to extract a scalar from GrB_Index i, // row index GrB_Index j // column index ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Matrix_extractElement(x,A,i,j) \ _Generic \ ( \ (x), \ GB_CASES (*, GrB, Matrix_extractElement), \ default: GrB_Matrix_extractElement_Scalar \ ) \ (x, A, i, j) #endif //------------------------------------------------------------------------------ // GrB_Matrix_removeElement //------------------------------------------------------------------------------ // GrB_Matrix_removeElement (A,i,j) removes the entry A(i,j) from the matrix A. GB_PUBLIC GrB_Info GrB_Matrix_removeElement ( GrB_Matrix C, // matrix to remove entry from GrB_Index i, // row index GrB_Index j // column index ) ; //------------------------------------------------------------------------------ // GrB_Matrix_extractTuples //------------------------------------------------------------------------------ // Extracts all tuples from a matrix, like [I,J,X] = find (A). If // any parameter I, J and/or X is NULL, then that component is not extracted. // For example, to extract just the row and col indices, pass I and J as // non-NULL, and X as NULL. This is like [I,J,~] = find (A). GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_BOOL // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples bool *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_INT8 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples int8_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_UINT8 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples uint8_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_INT16 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples int16_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_UINT16 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples uint16_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_INT32 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples int32_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_UINT32 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples uint32_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_INT64 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples int64_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_UINT64 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples uint64_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_FP32 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples float *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_FP64 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples double *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GxB_Matrix_extractTuples_FC32 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples GxB_FC32_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GxB_Matrix_extractTuples_FC64 // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples GxB_FC64_t *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; GB_PUBLIC GrB_Info GrB_Matrix_extractTuples_UDT // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples void *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; // Type-generic version: X can be a pointer to any supported C type or void * // for a user-defined type. /* GB_PUBLIC GrB_Info GrB_Matrix_extractTuples // [I,J,X] = find (A) ( GrB_Index *I, // array for returning row indices of tuples GrB_Index *J, // array for returning col indices of tuples <type> *X, // array for returning values of tuples GrB_Index *nvals, // I,J,X size on input; # tuples on output const GrB_Matrix A // matrix to extract tuples from ) ; */ #if GxB_STDC_VERSION >= 201112L #define GrB_Matrix_extractTuples(I,J,X,nvals,A) \ _Generic \ ( \ (X), \ GB_CASES (*, GrB, Matrix_extractTuples) \ ) \ (I, J, X, nvals, A) #endif //------------------------------------------------------------------------------ // GxB_Matrix_concat and GxB_Matrix_split //------------------------------------------------------------------------------ // GxB_Matrix_concat concatenates an array of matrices (Tiles) into a single // GrB_Matrix C. // Tiles is an m-by-n dense array of matrices held in row-major format, where // Tiles [i*n+j] is the (i,j)th tile, and where m > 0 and n > 0 must hold. Let // A{i,j} denote the (i,j)th tile. The matrix C is constructed by // concatenating these tiles together, as: // C = [ A{0,0} A{0,1} A{0,2} ... A{0,n-1} // A{1,0} A{1,1} A{1,2} ... A{1,n-1} // ... // A{m-1,0} A{m-1,1} A{m-1,2} ... A{m-1,n-1} ] // On input, the matrix C must already exist. Any existing entries in C are // discarded. C must have dimensions nrows by ncols where nrows is the sum of // # of rows in the matrices A{i,0} for all i, and ncols is the sum of the # of // columns in the matrices A{0,j} for all j. All matrices in any given tile // row i must have the same number of rows (that is, nrows(A{i,0}) must equal // nrows(A{i,j}) for all j), and all matrices in any given tile column j must // have the same number of columns (that is, ncols(A{0,j}) must equal // ncols(A{i,j}) for all i). // The type of C is unchanged, and all matrices A{i,j} are typecasted into the // type of C. Any settings made to C by GxB_Matrix_Option_set (format by row // or by column, bitmap switch, hyper switch, and sparsity control) are // unchanged. GB_PUBLIC GrB_Info GxB_Matrix_concat // concatenate a 2D array of matrices ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix *Tiles, // 2D row-major array of size m-by-n const GrB_Index m, const GrB_Index n, const GrB_Descriptor desc // unused, except threading control ) ; // GxB_Matrix_split does the opposite of GxB_Matrix_concat. It splits a single // input matrix A into a 2D array of tiles. On input, the Tiles array must be // a non-NULL pointer to a previously allocated array of size at least m*n // where both m and n must be > 0. The Tiles_nrows array has size m, and // Tiles_ncols has size n. The (i,j)th tile has dimension // Tiles_nrows[i]-by-Tiles_ncols[j]. The sum of Tiles_nrows [0:m-1] must equal // the number of rows of A, and the sum of Tiles_ncols [0:n-1] must equal the // number of columns of A. The type of each tile is the same as the type of A; // no typecasting is done. GB_PUBLIC GrB_Info GxB_Matrix_split // split a matrix into 2D array of matrices ( GrB_Matrix *Tiles, // 2D row-major array of size m-by-n const GrB_Index m, const GrB_Index n, const GrB_Index *Tile_nrows, // array of size m const GrB_Index *Tile_ncols, // array of size n const GrB_Matrix A, // input matrix to split const GrB_Descriptor desc // unused, except threading control ) ; //------------------------------------------------------------------------------ // GxB_Matrix_diag, GxB_Vector_diag, GrB_Matrix_diag //------------------------------------------------------------------------------ // GxB_Matrix_diag constructs a matrix from a vector. Let n be the length of // the v vector, from GrB_Vector_size (&n, v). If k = 0, then C is an n-by-n // diagonal matrix with the entries from v along the main diagonal of C, with // C(i,i) = v(i). If k is nonzero, C is square with dimension n+abs(k). If k // is positive, it denotes diagonals above the main diagonal, with C(i,i+k) = // v(i). If k is negative, it denotes diagonals below the main diagonal of C, // with C(i-k,i) = v(i). // C must already exist on input, of the correct size. Any existing entries in // C are discarded. The type of C is preserved, so that if the type of C and v // differ, the entries are typecasted into the type of C. Any settings made to // C by GxB_Matrix_Option_set (format by row or by column, bitmap switch, hyper // switch, and sparsity control) are unchanged. GB_PUBLIC GrB_Info GxB_Matrix_diag // construct a diagonal matrix from a vector ( GrB_Matrix C, // output matrix const GrB_Vector v, // input vector int64_t k, const GrB_Descriptor desc // to specify # of threads ) ; // GrB_Matrix_diag is identical to GxB_Matrix_diag (C, v, k, NULL), // using the default # of threads from the global setting. GB_PUBLIC GrB_Info GrB_Matrix_diag // construct a diagonal matrix from a vector ( GrB_Matrix C, // output matrix const GrB_Vector v, // input vector int64_t k ) ; // GxB_Vector_diag extracts a vector v from an input matrix A, which may be // rectangular. If k = 0, the main diagonal of A is extracted; k > 0 denotes // diagonals above the main diagonal of A, and k < 0 denotes diagonals below // the main diagonal of A. Let A have dimension m-by-n. If k is in the range // 0 to n-1, then v has length min(m,n-k). If k is negative and in the range // -1 to -m+1, then v has length min(m+k,n). If k is outside these ranges, // v has length 0 (this is not an error). // v must already exist on input, of the correct length; that is // GrB_Vector_size (&len,v) must return len = 0 if k >= n or k <= -m, len = // min(m,n-k) if k is in the range 0 to n-1, and len = min(m+k,n) if k is in // the range -1 to -m+1. Any existing entries in v are discarded. The type of // v is preserved, so that if the type of A and v differ, the entries are // typecasted into the type of v. Any settings made to v by // GxB_Vector_Option_set (bitmap switch and sparsity control) are unchanged. GB_PUBLIC GrB_Info GxB_Vector_diag // extract a diagonal from a matrix, as a vector ( GrB_Vector v, // output vector const GrB_Matrix A, // input matrix int64_t k, const GrB_Descriptor desc // unused, except threading control ) ; //============================================================================== // SuiteSparse:GraphBLAS options //============================================================================== // The following options modify how SuiteSparse:GraphBLAS stores and operates // on its matrices. The GxB_*Option* methods allow the user to suggest how the // internal representation of a matrix, or all matrices, should be held. These // options have no effect on the result (except for minor roundoff differences // for floating-point types). They only affect the time and memory usage of the // computations. // GxB_Matrix_Option_set: sets an option for a specific matrix // GxB_Matrix_Option_get: queries the current option of a specific matrix // GxB_Vector_Option_set: sets an option for a specific vector // GxB_Vector_Option_get: queries the current option of a specific vector // GxB_Global_Option_set: sets an option for all future matrices // GxB_Global_Option_get: queries current option for all future matrices #define GxB_HYPER 0 // (historical, use GxB_HYPER_SWITCH) typedef enum // for global options or matrix options { //------------------------------------------------------------ // for GxB_Matrix_Option_get/set and GxB_Global_Option_get/set: //------------------------------------------------------------ GxB_HYPER_SWITCH = 0, // defines switch to hypersparse (a double value) GxB_BITMAP_SWITCH = 34, // defines switch to bitmap (a double value) GxB_FORMAT = 1, // defines CSR/CSC format: GxB_BY_ROW or GxB_BY_COL //------------------------------------------------------------ // for GxB_Global_Option_get only: //------------------------------------------------------------ GxB_MODE = 2, // mode passed to GrB_init (blocking or non-blocking) GxB_LIBRARY_NAME = 8, // name of the library (char *) GxB_LIBRARY_VERSION = 9, // library version (3 int's) GxB_LIBRARY_DATE = 10, // date of the library (char *) GxB_LIBRARY_ABOUT = 11, // about the library (char *) GxB_LIBRARY_URL = 12, // URL for the library (char *) GxB_LIBRARY_LICENSE = 13, // license of the library (char *) GxB_LIBRARY_COMPILE_DATE = 14, // date library was compiled (char *) GxB_LIBRARY_COMPILE_TIME = 15, // time library was compiled (char *) GxB_API_VERSION = 16, // API version (3 int's) GxB_API_DATE = 17, // date of the API (char *) GxB_API_ABOUT = 18, // about the API (char *) GxB_API_URL = 19, // URL for the API (char *) GxB_COMPILER_VERSION = 23, // compiler version (3 int's) GxB_COMPILER_NAME = 24, // compiler name (char *) //------------------------------------------------------------ // for GxB_Global_Option_get/set only: //------------------------------------------------------------ GxB_GLOBAL_NTHREADS = GxB_NTHREADS, // max number of threads to use // If <= GxB_DEFAULT, then GraphBLAS selects the number // of threads automatically. GxB_GLOBAL_CHUNK = GxB_CHUNK, // chunk size for small problems. // If <= GxB_DEFAULT, then the default is used. GxB_BURBLE = 99, // diagnostic output (bool *) GxB_PRINTF = 101, // printf function diagnostic output GxB_FLUSH = 102, // flush function diagnostic output GxB_MEMORY_POOL = 103, // memory pool control GxB_PRINT_1BASED = 104, // print matrices as 0-based or 1-based //------------------------------------------------------------ // for GxB_Matrix_Option_get only: //------------------------------------------------------------ GxB_SPARSITY_STATUS = 33, // hyper, sparse, bitmap or full (1,2,4,8) GxB_IS_HYPER = 6, // historical; use GxB_SPARSITY_STATUS //------------------------------------------------------------ // for GxB_Matrix_Option_get/set only: //------------------------------------------------------------ GxB_SPARSITY_CONTROL = 32, // sparsity control: 0 to 15; see below //------------------------------------------------------------ // GPU and options (DRAFT: do not use) //------------------------------------------------------------ GxB_GLOBAL_GPU_CONTROL = GxB_GPU_CONTROL, GxB_GLOBAL_GPU_CHUNK = GxB_GPU_CHUNK, } GxB_Option_Field ; // GxB_FORMAT can be by row or by column: typedef enum { GxB_BY_ROW = 0, // CSR: compressed sparse row format GxB_BY_COL = 1, // CSC: compressed sparse column format GxB_NO_FORMAT = -1 // format not defined } GxB_Format_Value ; // The default format is by row. These constants are defined as GB_PUBLIC // const, so that if SuiteSparse:GraphBLAS is recompiled with a different // default format, and the application is relinked but not recompiled, it will // acquire the new default values. GB_PUBLIC const GxB_Format_Value GxB_FORMAT_DEFAULT ; // the default hyper_switch parameter GB_PUBLIC const double GxB_HYPER_DEFAULT ; // GxB_SPARSITY_CONTROL can be any sum or bitwise OR of these 4 values: #define GxB_HYPERSPARSE 1 // store matrix in hypersparse form #define GxB_SPARSE 2 // store matrix as sparse form (compressed vector) #define GxB_BITMAP 4 // store matrix as a bitmap #define GxB_FULL 8 // store matrix as full; all entries must be present // size of b array for GxB_set/get (GxB_BITMAP_SWITCH, b) #define GxB_NBITMAP_SWITCH 8 // size of bitmap_switch parameter array // any sparsity value: #define GxB_ANY_SPARSITY (GxB_HYPERSPARSE + GxB_SPARSE + GxB_BITMAP + GxB_FULL) // the default sparsity control is any format: #define GxB_AUTO_SPARSITY GxB_ANY_SPARSITY // GxB_Matrix_Option_set (A, GxB_SPARSITY_CONTROL, scontrol) provides hints // about which data structure GraphBLAS should use for the matrix A: // // GxB_AUTO_SPARSITY: GraphBLAS selects automatically. // GxB_HYPERSPARSE: always hypersparse, taking O(nvals(A)) space. // GxB_SPARSE: always in a sparse struture: compressed-sparse row/column, // taking O(nrows+nvals(A)) space if stored by row, or // O(ncols+nvals(A)) if stored by column. // GxB_BITMAP: always in a bitmap struture, taking O(nrows*ncols) space. // GxB_FULL: always in a full structure, taking O(nrows*ncols) space, // unless not all entries are present, in which case the bitmap // storage is used. // // These options can be summed. For example, to allow a matrix to be sparse // or hypersparse, but not bitmap or full, use GxB_SPARSE + GxB_HYPERSPARSE. // Since GxB_FULL can only be used when all entries are present, matrices with // the just GxB_FULL control setting are stored in bitmap form if any entries // are not present. // // Only the least 4 bits of the sparsity control are considered, so the // formats can be bitwise negated. For example, to allow for any format // except full, use ~GxB_FULL. // // GxB_Matrix_Option_get (A, GxB_SPARSITY_STATUS, &sparsity) returns the // current data structure currently used for the matrix A (either hypersparse, // sparse, bitmap, or full). // // GxB_Matrix_Option_get (A, GxB_SPARSITY_CONTROL, &scontrol) returns the hint // for how A should be stored (hypersparse, sparse, bitmap, or full, or any // combination). // GxB_HYPER_SWITCH: // If the matrix or vector structure can be sparse or hypersparse, the // GxB_HYPER_SWITCH parameter controls when each of these structures are // used. The parameter is not used if the matrix or vector is full or // bitmap. // // Let k be the actual number of non-empty vectors (with at least one // entry). This value k is not dependent on whether or not the matrix is // stored in hypersparse structure. Let n be the number of vectors (the # // of columns if CSC, or rows if CSR). Let h be the value of the // GxB_HYPER_SWITCH setting of the matrix. // // If a matrix is currently hypersparse, it can be converted to // non-hypersparse if (n <= 1 || k > 2*n*h). Otherwise it stays // hypersparse. If (n <= 1) the matrix is always stored as // non-hypersparse. // // If currently non-hypersparse, it can be converted to hypersparse if (n // > 1 && k <= n*h). Otherwise, it stays non-hypersparse. If (n <= 1) // the matrix always remains non-hypersparse. // // Setting GxB_HYPER_SWITCH to GxB_ALWAYS_HYPER or GxB_NEVER_HYPER ensures // a matrix always stays hypersparse, or always stays non-hypersparse, // respectively. GB_PUBLIC const double GxB_ALWAYS_HYPER, GxB_NEVER_HYPER ; GB_PUBLIC GrB_Info GxB_Matrix_Option_set // set an option in a matrix ( GrB_Matrix A, // matrix to modify GxB_Option_Field field, // option to change ... // value to change it to ) ; GB_PUBLIC GrB_Info GxB_Matrix_Option_get // gets the current option of a matrix ( GrB_Matrix A, // matrix to query GxB_Option_Field field, // option to query ... // return value of the matrix option ) ; GB_PUBLIC GrB_Info GxB_Vector_Option_set // set an option in a vector ( GrB_Vector A, // vector to modify GxB_Option_Field field, // option to change ... // value to change it to ) ; GB_PUBLIC GrB_Info GxB_Vector_Option_get // gets the current option of a vector ( GrB_Vector A, // vector to query GxB_Option_Field field, // option to query ... // return value of the vector option ) ; // GxB_Global_Option_set controls the global defaults used when a new matrix is // created. GrB_init defines the following initial settings: // // GxB_Global_Option_set (GxB_HYPER_SWITCH, GxB_HYPER_DEFAULT) ; // GxB_Global_Option_set (GxB_BITMAP_SWITCH, NULL) ; // GxB_Global_Option_set (GxB_FORMAT, GxB_FORMAT_DEFAULT) ; // // The compile-time constants GxB_HYPER_DEFAULT and GxB_FORMAT_DEFAULT are // equal to 0.0625 and GxB_BY_ROW, by default. That is, by default, all new // matrices are held by row in CSR format. If a matrix has fewer than n/16 // columns, it can be converted to hypersparse structure. If it has more than // n/8 columns, it can be converted to a sparse structure. Modifying these // global settings via GxB_Global_Option_set has no effect on matrices already // created. GB_PUBLIC GrB_Info GxB_Global_Option_set // set a global default option ( GxB_Option_Field field, // option to change ... // value to change it to ) ; GB_PUBLIC GrB_Info GxB_Global_Option_get // gets the current global default option ( GxB_Option_Field field, // option to query ... // return value of the global option ) ; //------------------------------------------------------------------------------ // GxB_set and GxB_get //------------------------------------------------------------------------------ // The simplest way to set/get a value of a GrB_Descriptor is with // the generic GxB_set and GxB_get functions: // GxB_set (desc, field, value) ; // GxB_get (desc, field, &value) ; // GxB_set and GxB_get are generic methods that and set or query the options in // a GrB_Matrix, a GrB_Descriptor, or in the global options. They can be used // with the following syntax. Note that GxB_NTHREADS can be used for both the // global nthreads_max, and for the # of threads in the descriptor. // To set/get the global options: // // GxB_set (GxB_HYPER_SWITCH, double h) ; // GxB_set (GxB_HYPER_SWITCH, GxB_ALWAYS_HYPER) ; // GxB_set (GxB_HYPER_SWITCH, GxB_NEVER_HYPER) ; // GxB_get (GxB_HYPER_SWITCH, double *h) ; // // double b [GxB_NBITMAP_SWITCH] ; // GxB_set (GxB_BITMAP_SWITCH, b) ; // GxB_set (GxB_BITMAP_SWITCH, NULL) ; // set defaults // GxB_get (GxB_BITMAP_SWITCH, b) ; // // GxB_set (GxB_FORMAT, GxB_BY_ROW) ; // GxB_set (GxB_FORMAT, GxB_BY_COL) ; // GxB_get (GxB_FORMAT, GxB_Format_Value *s) ; // // GxB_set (GxB_NTHREADS, nthreads_max) ; // GxB_get (GxB_NTHREADS, int *nthreads_max) ; // // GxB_set (GxB_CHUNK, double chunk) ; // GxB_get (GxB_CHUNK, double *chunk) ; // // GxB_set (GxB_BURBLE, bool burble) ; // GxB_get (GxB_BURBLE, bool *burble) ; // // GxB_set (GxB_PRINTF, void *printf_function) ; // GxB_get (GxB_PRINTF, void **printf_function) ; // // GxB_set (GxB_FLUSH, void *flush_function) ; // GxB_get (GxB_FLUSH, void **flush_function) ; // // int64_t free_pool_limit [64] ; // GxB_set (GxB_MEMORY_POOL, free_pool_limit) ; // GxB_set (GxB_MEMORY_POOL, NULL) ; // set defaults // GxB_get (GxB_MEMORY_POOL, free_pool_limit) ; // To get global options that can be queried but not modified: // // GxB_get (GxB_MODE, GrB_Mode *mode) ; // To set/get a matrix option: // // GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, double h) ; // GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, GxB_ALWAYS_HYPER) ; // GxB_set (GrB_Matrix A, GxB_HYPER_SWITCH, GxB_NEVER_HYPER) ; // GxB_get (GrB_Matrix A, GxB_HYPER_SWITCH, double *h) ; // // GxB_set (GrB_Matrix A, GxB_BITMAP_SWITCH, double b) ; // GxB_get (GrB_Matrix A, GxB_BITMAP_SWITCH, double *b) ; // // GxB_set (GrB_Matrix A, GxB_FORMAT, GxB_BY_ROW) ; // GxB_set (GrB_Matrix A, GxB_FORMAT, GxB_BY_COL) ; // GxB_get (GrB_Matrix A, GxB_FORMAT, GxB_Format_Value *s) ; // // GxB_set (GrB_Matrix A, GxB_SPARSITY_CONTROL, GxB_AUTO_SPARSITY) ; // GxB_set (GrB_Matrix A, GxB_SPARSITY_CONTROL, scontrol) ; // GxB_get (GrB_Matrix A, GxB_SPARSITY_CONTROL, int *scontrol) ; // // GxB_get (GrB_Matrix A, GxB_SPARSITY_STATUS, int *sparsity) ; // To set/get a vector option or status: // // GxB_set (GrB_Vector v, GxB_BITMAP_SWITCH, double b) ; // GxB_get (GrB_Vector v, GxB_BITMAP_SWITCH, double *b) ; // // GxB_set (GrB_Vector v, GxB_FORMAT, GxB_BY_ROW) ; // GxB_set (GrB_Vector v, GxB_FORMAT, GxB_BY_COL) ; // GxB_get (GrB_Vector v, GxB_FORMAT, GxB_Format_Value *s) ; // // GxB_set (GrB_Vector v, GxB_SPARSITY_CONTROL, GxB_AUTO_SPARSITY) ; // GxB_set (GrB_Vector v, GxB_SPARSITY_CONTROL, scontrol) ; // GxB_get (GrB_Vector v, GxB_SPARSITY_CONTROL, int *scontrol) ; // // GxB_get (GrB_Vector v, GxB_SPARSITY_STATUS, int *sparsity) ; // To set/get a descriptor field: // // GxB_set (GrB_Descriptor d, GrB_OUTP, GxB_DEFAULT) ; // GxB_set (GrB_Descriptor d, GrB_OUTP, GrB_REPLACE) ; // GxB_get (GrB_Descriptor d, GrB_OUTP, GrB_Desc_Value *v) ; // // GxB_set (GrB_Descriptor d, GrB_MASK, GxB_DEFAULT) ; // GxB_set (GrB_Descriptor d, GrB_MASK, GrB_COMP) ; // GxB_set (GrB_Descriptor d, GrB_MASK, GrB_STRUCTURE) ; // GxB_set (GrB_Descriptor d, GrB_MASK, GrB_COMP + GrB_STRUCTURE) ; // GxB_get (GrB_Descriptor d, GrB_MASK, GrB_Desc_Value *v) ; // // GxB_set (GrB_Descriptor d, GrB_INP0, GxB_DEFAULT) ; // GxB_set (GrB_Descriptor d, GrB_INP0, GrB_TRAN) ; // GxB_get (GrB_Descriptor d, GrB_INP0, GrB_Desc_Value *v) ; // // GxB_set (GrB_Descriptor d, GrB_INP1, GxB_DEFAULT) ; // GxB_set (GrB_Descriptor d, GrB_INP1, GrB_TRAN) ; // GxB_get (GrB_Descriptor d, GrB_INP1, GrB_Desc_Value *v) ; // // GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_DEFAULT) ; // GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_GUSTAVSON) ; // GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_HASH) ; // GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_SAXPY) ; // GxB_set (GrB_Descriptor d, GxB_AxB_METHOD, GxB_AxB_DOT) ; // GxB_get (GrB_Descriptor d, GrB_AxB_METHOD, GrB_Desc_Value *v) ; // // GxB_set (GrB_Descriptor d, GxB_NTHREADS, nthreads) ; // GxB_get (GrB_Descriptor d, GxB_NTHREADS, int *nthreads) ; // // GxB_set (GrB_Descriptor d, GxB_CHUNK, double chunk) ; // GxB_get (GrB_Descriptor d, GxB_CHUNK, double *chunk) ; // // GxB_set (GrB_Descriptor d, GxB_SORT, int sort) ; // GxB_get (GrB_Descriptor d, GxB_SORT, int *sort) ; // // GxB_set (GrB_Descriptor d, GxB_COMPRESSION, int method) ; // GxB_get (GrB_Descriptor d, GxB_COMPRESSION, int *method) ; // // GxB_set (GrB_Descriptor d, GxB_IMPORT, int method) ; // GxB_get (GrB_Descriptor d, GxB_IMPORT, int *method) ; #if GxB_STDC_VERSION >= 201112L #define GxB_set(arg1,...) \ _Generic \ ( \ (arg1), \ int : GxB_Global_Option_set , \ GxB_Option_Field : GxB_Global_Option_set , \ GrB_Vector : GxB_Vector_Option_set , \ GrB_Matrix : GxB_Matrix_Option_set , \ GrB_Descriptor : GxB_Desc_set \ ) \ (arg1, __VA_ARGS__) #define GxB_get(arg1,...) \ _Generic \ ( \ (arg1), \ const int : GxB_Global_Option_get , \ int : GxB_Global_Option_get , \ const GxB_Option_Field : GxB_Global_Option_get , \ GxB_Option_Field : GxB_Global_Option_get , \ const GrB_Vector : GxB_Vector_Option_get , \ GrB_Vector : GxB_Vector_Option_get , \ const GrB_Matrix : GxB_Matrix_Option_get , \ GrB_Matrix : GxB_Matrix_Option_get , \ const GrB_Descriptor : GxB_Desc_get , \ GrB_Descriptor : GxB_Desc_get \ ) \ (arg1, __VA_ARGS__) #endif //============================================================================== // GrB_free: free any GraphBLAS object //============================================================================== // for null and invalid objects #define GrB_NULL NULL #define GrB_INVALID_HANDLE NULL #if GxB_STDC_VERSION >= 201112L #define GrB_free(object) \ _Generic \ ( \ (object), \ GrB_Type *: GrB_Type_free , \ GrB_UnaryOp *: GrB_UnaryOp_free , \ GrB_BinaryOp *: GrB_BinaryOp_free , \ GxB_SelectOp *: GxB_SelectOp_free , \ GrB_IndexUnaryOp *: GrB_IndexUnaryOp_free , \ GrB_Monoid *: GrB_Monoid_free , \ GrB_Semiring *: GrB_Semiring_free , \ GrB_Scalar *: GrB_Scalar_free , \ GrB_Vector *: GrB_Vector_free , \ GrB_Matrix *: GrB_Matrix_free , \ GrB_Descriptor *: GrB_Descriptor_free , \ GxB_Iterator *: GxB_Iterator_free \ ) \ (object) #endif //============================================================================== // GrB_wait: finish computations //============================================================================== typedef enum { GrB_COMPLETE = 0, // establishes a happens-before relation GrB_MATERIALIZE = 1 // object is complete } GrB_WaitMode ; // Finish all pending work in a specific object. GB_PUBLIC GrB_Info GrB_Type_wait (GrB_Type type , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_UnaryOp_wait (GrB_UnaryOp op , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_BinaryOp_wait (GrB_BinaryOp op , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GxB_SelectOp_wait (GxB_SelectOp op , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_IndexUnaryOp_wait (GrB_IndexUnaryOp op , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_Monoid_wait (GrB_Monoid monoid , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_Semiring_wait (GrB_Semiring semiring, GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_Descriptor_wait (GrB_Descriptor desc , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_Scalar_wait (GrB_Scalar s , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_Vector_wait (GrB_Vector v , GrB_WaitMode waitmode) ; GB_PUBLIC GrB_Info GrB_Matrix_wait (GrB_Matrix A , GrB_WaitMode waitmode) ; // GrB_wait (object,waitmode) polymorphic function: #if GxB_STDC_VERSION >= 201112L #define GrB_wait(object,waitmode) \ _Generic \ ( \ (object), \ GrB_Type : GrB_Type_wait , \ GrB_UnaryOp : GrB_UnaryOp_wait , \ GrB_BinaryOp : GrB_BinaryOp_wait , \ GxB_SelectOp : GxB_SelectOp_wait , \ GrB_IndexUnaryOp : GrB_IndexUnaryOp_wait , \ GrB_Monoid : GrB_Monoid_wait , \ GrB_Semiring : GrB_Semiring_wait , \ GrB_Scalar : GrB_Scalar_wait , \ GrB_Vector : GrB_Vector_wait , \ GrB_Matrix : GrB_Matrix_wait , \ GrB_Descriptor : GrB_Descriptor_wait \ ) \ (object, waitmode) #endif // NOTE: GxB_Scalar_wait is historical; use GrB_Scalar_wait instead GB_PUBLIC GrB_Info GxB_Scalar_wait (GrB_Scalar *s) ; //============================================================================== // GrB_error: error handling //============================================================================== // Each GraphBLAS method and operation returns a GrB_Info error code. // GrB_error returns additional information on the error in a thread-safe // null-terminated string. The string returned by GrB_error is owned by // the GraphBLAS library and must not be free'd. GB_PUBLIC GrB_Info GrB_Type_error (const char **error, const GrB_Type type) ; GB_PUBLIC GrB_Info GrB_UnaryOp_error (const char **error, const GrB_UnaryOp op) ; GB_PUBLIC GrB_Info GrB_BinaryOp_error (const char **error, const GrB_BinaryOp op) ; GB_PUBLIC GrB_Info GxB_SelectOp_error (const char **error, const GxB_SelectOp op) ; GB_PUBLIC GrB_Info GrB_IndexUnaryOp_error (const char **error, const GrB_IndexUnaryOp op) ; GB_PUBLIC GrB_Info GrB_Monoid_error (const char **error, const GrB_Monoid monoid) ; GB_PUBLIC GrB_Info GrB_Semiring_error (const char **error, const GrB_Semiring semiring) ; GB_PUBLIC GrB_Info GrB_Scalar_error (const char **error, const GrB_Scalar s) ; GB_PUBLIC GrB_Info GrB_Vector_error (const char **error, const GrB_Vector v) ; GB_PUBLIC GrB_Info GrB_Matrix_error (const char **error, const GrB_Matrix A) ; GB_PUBLIC GrB_Info GrB_Descriptor_error (const char **error, const GrB_Descriptor d) ; // GxB_Scalar_error is historical: use GrB_Scalar_error instead GB_PUBLIC GrB_Info GxB_Scalar_error (const char **error, const GrB_Scalar s) ; // GrB_error (error,object) polymorphic function: #if GxB_STDC_VERSION >= 201112L #define GrB_error(error,object) \ _Generic \ ( \ (object), \ const GrB_Type : GrB_Type_error , \ GrB_Type : GrB_Type_error , \ const GrB_UnaryOp : GrB_UnaryOp_error , \ GrB_UnaryOp : GrB_UnaryOp_error , \ const GrB_BinaryOp : GrB_BinaryOp_error , \ GrB_BinaryOp : GrB_BinaryOp_error , \ const GxB_SelectOp : GxB_SelectOp_error , \ GxB_SelectOp : GxB_SelectOp_error , \ const GrB_IndexUnaryOp : GrB_IndexUnaryOp_error , \ GrB_IndexUnaryOp : GrB_IndexUnaryOp_error , \ const GrB_Monoid : GrB_Monoid_error , \ GrB_Monoid : GrB_Monoid_error , \ const GrB_Semiring : GrB_Semiring_error , \ GrB_Semiring : GrB_Semiring_error , \ const GrB_Scalar : GrB_Scalar_error , \ GrB_Scalar : GrB_Scalar_error , \ const GrB_Vector : GrB_Vector_error , \ GrB_Vector : GrB_Vector_error , \ const GrB_Matrix : GrB_Matrix_error , \ GrB_Matrix : GrB_Matrix_error , \ const GrB_Descriptor : GrB_Descriptor_error , \ GrB_Descriptor : GrB_Descriptor_error \ ) \ (error, object) #endif //============================================================================== // GrB_mxm, vxm, mxv: matrix multiplication over a semiring //============================================================================== GB_PUBLIC GrB_Info GrB_mxm // C<Mask> = accum (C, A*B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Semiring semiring, // defines '+' and '*' for A*B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; GB_PUBLIC GrB_Info GrB_vxm // w'<Mask> = accum (w, u'*A) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Semiring semiring, // defines '+' and '*' for u'*A const GrB_Vector u, // first input: vector u const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for w, mask, and A ) ; GB_PUBLIC GrB_Info GrB_mxv // w<Mask> = accum (w, A*u) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Semiring semiring, // defines '+' and '*' for A*B const GrB_Matrix A, // first input: matrix A const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w, mask, and A ) ; //============================================================================== // GrB_eWiseMult: element-wise matrix and vector operations, set intersection //============================================================================== // GrB_eWiseMult computes C<Mask> = accum (C, A.*B), where ".*" is the Hadamard // product, and where pairs of elements in two matrices (or vectors) are // pairwise "multiplied" with C(i,j) = mult (A(i,j),B(i,j)). GB_PUBLIC GrB_Info GrB_Vector_eWiseMult_Semiring // w<Mask> = accum (w, u.*v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Semiring semiring, // defines '.*' for t=u.*v const GrB_Vector u, // first input: vector u const GrB_Vector v, // second input: vector v const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_eWiseMult_Monoid // w<Mask> = accum (w, u.*v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Monoid monoid, // defines '.*' for t=u.*v const GrB_Vector u, // first input: vector u const GrB_Vector v, // second input: vector v const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_eWiseMult_BinaryOp // w<Mask> = accum (w, u.*v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp mult, // defines '.*' for t=u.*v const GrB_Vector u, // first input: vector u const GrB_Vector v, // second input: vector v const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_eWiseMult_Semiring // C<Mask> = accum (C, A.*B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Semiring semiring, // defines '.*' for T=A.*B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_eWiseMult_Monoid // C<Mask> = accum (C, A.*B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Monoid monoid, // defines '.*' for T=A.*B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_eWiseMult_BinaryOp // C<Mask> = accum (C, A.*B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp mult, // defines '.*' for T=A.*B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; // All 6 of the above type-specific functions are captured in a single // type-generic function, GrB_eWiseMult: #if GxB_STDC_VERSION >= 201112L #define GrB_eWiseMult(C,Mask,accum,op,A,B,desc) \ _Generic \ ( \ (C), \ GrB_Matrix : \ _Generic \ ( \ (op), \ const GrB_Semiring : GrB_Matrix_eWiseMult_Semiring , \ GrB_Semiring : GrB_Matrix_eWiseMult_Semiring , \ const GrB_Monoid : GrB_Matrix_eWiseMult_Monoid , \ GrB_Monoid : GrB_Matrix_eWiseMult_Monoid , \ const GrB_BinaryOp : GrB_Matrix_eWiseMult_BinaryOp , \ GrB_BinaryOp : GrB_Matrix_eWiseMult_BinaryOp \ ), \ GrB_Vector : \ _Generic \ ( \ (op), \ const GrB_Semiring : GrB_Vector_eWiseMult_Semiring , \ GrB_Semiring : GrB_Vector_eWiseMult_Semiring , \ const GrB_Monoid : GrB_Vector_eWiseMult_Monoid , \ GrB_Monoid : GrB_Vector_eWiseMult_Monoid , \ const GrB_BinaryOp : GrB_Vector_eWiseMult_BinaryOp , \ GrB_BinaryOp : GrB_Vector_eWiseMult_BinaryOp \ ) \ ) \ (C, Mask, accum, op, A, B, desc) #endif //============================================================================== // GrB_eWiseAdd: element-wise matrix and vector operations, set union //============================================================================== // GrB_eWiseAdd computes C<Mask> = accum (C, A+B), where pairs of elements in // two matrices (or two vectors) are pairwise "added". GB_PUBLIC GrB_Info GrB_Vector_eWiseAdd_Semiring // w<mask> = accum (w, u+v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Semiring semiring, // defines '+' for t=u+v const GrB_Vector u, // first input: vector u const GrB_Vector v, // second input: vector v const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_eWiseAdd_Monoid // w<mask> = accum (w, u+v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Monoid monoid, // defines '+' for t=u+v const GrB_Vector u, // first input: vector u const GrB_Vector v, // second input: vector v const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_eWiseAdd_BinaryOp // w<mask> = accum (w, u+v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp add, // defines '+' for t=u+v const GrB_Vector u, // first input: vector u const GrB_Vector v, // second input: vector v const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_eWiseAdd_Semiring // C<Mask> = accum (C, A+B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Semiring semiring, // defines '+' for T=A+B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_eWiseAdd_Monoid // C<Mask> = accum (C, A+B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Monoid monoid, // defines '+' for T=A+B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_eWiseAdd_BinaryOp // C<Mask> = accum (C, A+B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp add, // defines '+' for T=A+B const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; #if GxB_STDC_VERSION >= 201112L #define GrB_eWiseAdd(C,Mask,accum,op,A,B,desc) \ _Generic \ ( \ (C), \ GrB_Matrix : \ _Generic \ ( \ (op), \ const GrB_Semiring : GrB_Matrix_eWiseAdd_Semiring , \ GrB_Semiring : GrB_Matrix_eWiseAdd_Semiring , \ const GrB_Monoid : GrB_Matrix_eWiseAdd_Monoid , \ GrB_Monoid : GrB_Matrix_eWiseAdd_Monoid , \ const GrB_BinaryOp : GrB_Matrix_eWiseAdd_BinaryOp , \ GrB_BinaryOp : GrB_Matrix_eWiseAdd_BinaryOp \ ), \ GrB_Vector : \ _Generic \ ( \ (op), \ const GrB_Semiring : GrB_Vector_eWiseAdd_Semiring , \ GrB_Semiring : GrB_Vector_eWiseAdd_Semiring , \ const GrB_Monoid : GrB_Vector_eWiseAdd_Monoid , \ GrB_Monoid : GrB_Vector_eWiseAdd_Monoid , \ const GrB_BinaryOp : GrB_Vector_eWiseAdd_BinaryOp , \ GrB_BinaryOp : GrB_Vector_eWiseAdd_BinaryOp \ ) \ ) \ (C, Mask, accum, op, A, B, desc) #endif //============================================================================== // GxB_eWiseUnion: a variant of GrB_eWiseAdd //============================================================================== // GxB_eWiseUnion is a variant of eWiseAdd. They differ when an entry is // present in A but not B, or in B but not A. // eWiseAdd does the following, for a matrix, where "+" is the add binary op: // if A(i,j) and B(i,j) are both present: // C(i,j) = A(i,j) + B(i,j) // else if A(i,j) is present but not B(i,j) // C(i,j) = A(i,j) // else if B(i,j) is present but not A(i,j) // C(i,j) = B(i,j) // by constrast, eWiseUnion always applies the operator: // if A(i,j) and B(i,j) are both present: // C(i,j) = A(i,j) + B(i,j) // else if A(i,j) is present but not B(i,j) // C(i,j) = A(i,j) + beta // else if B(i,j) is present but not A(i,j) // C(i,j) = alpha + B(i,j) GB_PUBLIC GrB_Info GxB_Vector_eWiseUnion // w<mask> = accum (w, u+v) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp add, // defines '+' for t=u+v const GrB_Vector u, // first input: vector u const GrB_Scalar alpha, const GrB_Vector v, // second input: vector v const GrB_Scalar beta, const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_eWiseUnion // C<M> = accum (C, A+B) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp add, // defines '+' for T=A+B const GrB_Matrix A, // first input: matrix A const GrB_Scalar alpha, const GrB_Matrix B, // second input: matrix B const GrB_Scalar beta, const GrB_Descriptor desc // descriptor for C, M, A, and B ) ; #if GxB_STDC_VERSION >= 201112L #define GxB_eWiseUnion(C,Mask,accum,op,A,alpha,B,beta,desc) \ _Generic \ ( \ (C), \ const GrB_Matrix : GxB_Matrix_eWiseUnion , \ GrB_Matrix : GxB_Matrix_eWiseUnion , \ const GrB_Vector : GxB_Vector_eWiseUnion , \ GrB_Vector : GxB_Vector_eWiseUnion \ ) \ (C, Mask, accum, op, A, alpha, B, beta, desc) #endif //============================================================================== // GrB_extract: extract a submatrix or subvector //============================================================================== // Extract entries from a matrix or vector; T = A(I,J). This (like most // GraphBLAS methods) is then followed by C<Mask>=accum(C,T). // To extract all rows of a matrix or vector, as in A (:,J), use I=GrB_ALL as // the input argument. For all columns of a matrix, use J=GrB_ALL. GB_PUBLIC const uint64_t *GrB_ALL ; // To extract a range of rows and columns, I and J can be a list of 2 or 3 // indices that defines a range (begin:end) or a strided range (begin:inc:end). // To specify the colon syntax I = begin:end, the array I has size at least 2, // where I [GxB_BEGIN] = begin and I [GxB_END] = end. The parameter ni is then // passed as the special value GxB_RANGE. To specify the colon syntax I = // begin:inc:end, the array I has size at least three, with the values begin, // end, and inc (in that order), and then pass in the value ni = GxB_STRIDE. // The same can be done for the list J and its size, nj. // These special values of ni and nj can be used for GrB_assign, // GrB_extract, and GxB_subassign. #define GxB_RANGE (INT64_MAX) #define GxB_STRIDE (INT64_MAX-1) #define GxB_BACKWARDS (INT64_MAX-2) // for the strided range begin:inc:end, I [GxB_BEGIN] is the value of begin, I // [GxB_END] is the value end, I [GxB_INC] is the magnitude of the stride. If // the stride is negative, use ni = GxB_BACKWARDS. #define GxB_BEGIN (0) #define GxB_END (1) #define GxB_INC (2) // For example, the notation 10:-2:1 defines a sequence [10 8 6 4 2]. // The end point of the sequence (1) need not appear in the sequence, if // the last increment goes past it. To specify the same in GraphBLAS, // use: // GrB_Index I [3], ni = GxB_BACKWARDS ; // I [GxB_BEGIN ] = 10 ; // the start of the sequence // I [GxB_INC ] = 2 ; // the magnitude of the increment // I [GxB_END ] = 1 ; // the end of the sequence GB_PUBLIC GrB_Info GrB_Vector_extract // w<mask> = accum (w, u(I)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Vector u, // first input: vector u const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_extract // C<Mask> = accum (C, A(I,J)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Matrix A, // first input: matrix A const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C, Mask, and A ) ; GB_PUBLIC GrB_Info GrB_Col_extract // w<mask> = accum (w, A(I,j)) ( GrB_Vector w, // input/output matrix for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Matrix A, // first input: matrix A const GrB_Index *I, // row indices GrB_Index ni, // number of row indices GrB_Index j, // column index const GrB_Descriptor desc // descriptor for w, mask, and A ) ; //------------------------------------------------------------------------------ // GrB_extract: generic matrix/vector extraction //------------------------------------------------------------------------------ // GrB_extract is a generic interface to the following functions: // GrB_Vector_extract (w,mask,acc,u,I,ni,d) // w<m> = acc (w, u(I)) // GrB_Col_extract (w,mask,acc,A,I,ni,j,d) // w<m> = acc (w, A(I,j)) // GrB_Matrix_extract (C,Mask,acc,A,I,ni,J,nj,d) // C<Mask> = acc (C, A(I,J)) #if GxB_STDC_VERSION >= 201112L #define GrB_extract(arg1,Mask,accum,arg4,...) \ _Generic \ ( \ (arg1), \ GrB_Vector : \ _Generic \ ( \ (arg4), \ const GrB_Vector : GrB_Vector_extract , \ GrB_Vector : GrB_Vector_extract , \ const GrB_Matrix : GrB_Col_extract , \ GrB_Matrix : GrB_Col_extract \ ), \ GrB_Matrix : GrB_Matrix_extract \ ) \ (arg1, Mask, accum, arg4, __VA_ARGS__) #endif //============================================================================== // GxB_subassign: matrix and vector subassign: C(I,J)<Mask> = accum (C(I,J), A) //============================================================================== // Assign entries in a matrix or vector; C(I,J) = A. // Each GxB_subassign function is very similar to its corresponding GrB_assign // function in the spec, but they differ in two ways: (1) the mask in // GxB_subassign has the same size as w(I) for vectors and C(I,J) for matrices, // and (2) they differ in the GrB_REPLACE option. See the user guide for // details. // In GraphBLAS notation, the two methods can be described as follows: // matrix and vector subassign: C(I,J)<Mask> = accum (C(I,J), A) // matrix and vector assign: C<Mask>(I,J) = accum (C(I,J), A) // --- assign ------------------------------------------------------------------ // // GrB_Matrix_assign C<M>(I,J) += A M same size as matrix C. // A is |I|-by-|J| // // GrB_Vector_assign w<m>(I) += u m same size as column vector w. // u is |I|-by-1 // // GrB_Row_assign C<m'>(i,J) += u' m is a column vector the same // size as a row of C. // u is |J|-by-1, i is a scalar. // // GrB_Col_assign C<m>(I,j) += u m is a column vector the same // size as a column of C. // u is |I|-by-1, j is a scalar. // // --- subassign --------------------------------------------------------------- // // GxB_Matrix_subassign C(I,J)<M> += A M same size as matrix A. // A is |I|-by-|J| // // GxB_Vector_subassign w(I)<m> += u m same size as column vector u. // u is |I|-by-1 // // GxB_Row_subassign C(i,J)<m'> += u' m same size as column vector u. // u is |J|-by-1, i is a scalar. // // GxB_Col_subassign C(I,j)<m> += u m same size as column vector u. // u is |I|-by-1, j is a scalar. GB_PUBLIC GrB_Info GxB_Vector_subassign // w(I)<mask> = accum (w(I),u) ( GrB_Vector w, // input/output matrix for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w(I),t) const GrB_Vector u, // first input: vector u const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign // C(I,J)<Mask> = accum (C(I,J),A) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),T) const GrB_Matrix A, // first input: matrix A const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J), Mask, and A ) ; GB_PUBLIC GrB_Info GxB_Col_subassign // C(I,j)<mask> = accum (C(I,j),u) ( GrB_Matrix C, // input/output matrix for results const GrB_Vector mask, // optional mask for C(I,j), unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(C(I,j),t) const GrB_Vector u, // input vector const GrB_Index *I, // row indices GrB_Index ni, // number of row indices GrB_Index j, // column index const GrB_Descriptor desc // descriptor for C(I,j) and mask ) ; GB_PUBLIC GrB_Info GxB_Row_subassign // C(i,J)<mask'> = accum (C(i,J),u') ( GrB_Matrix C, // input/output matrix for results const GrB_Vector mask, // optional mask for C(i,J), unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(C(i,J),t) const GrB_Vector u, // input vector GrB_Index i, // row index const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(i,J) and mask ) ; //------------------------------------------------------------------------------ // GxB_Vector_subassign_[SCALAR]: scalar expansion assignment to subvector //------------------------------------------------------------------------------ // Assigns a single scalar to a subvector, w(I)<mask> = accum(w(I),x). The // scalar x is implicitly expanded into a vector u of size ni-by-1, with each // entry in u equal to x, and then w(I)<mask> = accum(w(I),u) is done. GB_PUBLIC GrB_Info GxB_Vector_subassign_BOOL // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w(I),x) bool x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_INT8 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int8_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_UINT8 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint8_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_INT16 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int16_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_UINT16 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint16_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_INT32 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int32_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_UINT32 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint32_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_INT64 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int64_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_UINT64 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint64_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_FP32 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) float x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_FP64 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) double x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_FC32 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) GxB_FC32_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_FC64 // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) GxB_FC64_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_UDT // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) void *x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_subassign_Scalar // w(I)<mask> = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w(I), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) GrB_Scalar x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w(I) and mask ) ; //------------------------------------------------------------------------------ // GxB_Matrix_subassign_[SCALAR]: scalar expansion assignment to submatrix //------------------------------------------------------------------------------ // Assigns a single scalar to a submatrix, C(I,J)<Mask> = accum(C(I,J),x). The // scalar x is implicitly expanded into a matrix A of size ni-by-nj, with each // entry in A equal to x, and then C(I,J)<Mask> = accum(C(I,J),A) is done. GB_PUBLIC GrB_Info GxB_Matrix_subassign_BOOL // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) bool x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_INT8 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int8_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_UINT8 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint8_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_INT16 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int16_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_UINT16 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint16_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_INT32 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int32_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_UINT32 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint32_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_INT64 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int64_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_UINT64 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint64_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_FP32 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) float x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_FP64 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) double x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_FC32 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) GxB_FC32_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_FC64 // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) GxB_FC64_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_UDT // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) void *x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_subassign_Scalar // C(I,J)<Mask> = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C(I,J), unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) GrB_Scalar x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(I,J) and Mask ) ; //------------------------------------------------------------------------------ // GxB_subassign: generic submatrix/subvector assignment //------------------------------------------------------------------------------ // GxB_subassign is a generic function that provides access to all specific // GxB_*_subassign* functions: // GxB_Vector_subassign (w,m,acc,u,I,ni,d) // w(I)<m> = acc(w(I),u) // GxB_Matrix_subassign (C,M,acc,A,I,ni,J,nj,d) // C(I,J)<M> = acc(C(I,J),A) // GxB_Col_subassign (C,m,acc,u,I,ni,j,d) // C(I,j)<m> = acc(C(I,j),u) // GxB_Row_subassign (C,m,acc,u,i,J,nj,d) // C(i,J)<m'> = acc(C(i,J),u') // GxB_Vector_subassign_T (w,m,acc,x,I,ni,d) // w(I)<m> = acc(w(I),x) // GxB_Matrix_subassign_T (C,M,acc,x,I,ni,J,nj,d) // C(I,J)<M> = acc(C(I,J),x) #if GxB_STDC_VERSION >= 201112L #define GxB_subassign(arg1,Mask,accum,arg4,arg5,...) \ _Generic \ ( \ (arg1), \ GrB_Vector : \ _Generic \ ( \ (arg4), \ GB_CASES (, GxB, Vector_subassign) , \ const GrB_Scalar : GxB_Vector_subassign_Scalar, \ GrB_Scalar : GxB_Vector_subassign_Scalar, \ default: GxB_Vector_subassign \ ), \ default: \ _Generic \ ( \ (arg4), \ GB_CASES (, GxB, Matrix_subassign) , \ const GrB_Scalar : GxB_Matrix_subassign_Scalar, \ GrB_Scalar : GxB_Matrix_subassign_Scalar, \ const GrB_Vector : \ _Generic \ ( \ (arg5), \ const GrB_Index *: GxB_Col_subassign , \ GrB_Index *: GxB_Col_subassign , \ default: GxB_Row_subassign \ ), \ GrB_Vector : \ _Generic \ ( \ (arg5), \ const GrB_Index *: GxB_Col_subassign , \ GrB_Index *: GxB_Col_subassign , \ default: GxB_Row_subassign \ ), \ default: GxB_Matrix_subassign \ ) \ ) \ (arg1, Mask, accum, arg4, arg5, __VA_ARGS__) #endif //============================================================================== // GrB_assign: matrix and vector assign: C<Mask>(I,J) = accum (C(I,J), A) //============================================================================== // Assign entries in a matrix or vector; C(I,J) = A. // Each of these can be used with their generic name, GrB_assign. GB_PUBLIC GrB_Info GrB_Vector_assign // w<mask>(I) = accum (w(I),u) ( GrB_Vector w, // input/output matrix for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w(I),t) const GrB_Vector u, // first input: vector u const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign // C<Mask>(I,J) = accum (C(I,J),A) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),T) const GrB_Matrix A, // first input: matrix A const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C, Mask, and A ) ; GB_PUBLIC GrB_Info GrB_Col_assign // C<mask>(I,j) = accum (C(I,j),u) ( GrB_Matrix C, // input/output matrix for results const GrB_Vector mask, // optional mask for C(:,j), unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(C(I,j),t) const GrB_Vector u, // input vector const GrB_Index *I, // row indices GrB_Index ni, // number of row indices GrB_Index j, // column index const GrB_Descriptor desc // descriptor for C(:,j) and mask ) ; GB_PUBLIC GrB_Info GrB_Row_assign // C<mask'>(i,J) = accum (C(i,J),u') ( GrB_Matrix C, // input/output matrix for results const GrB_Vector mask, // optional mask for C(i,:), unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(C(i,J),t) const GrB_Vector u, // input vector GrB_Index i, // row index const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C(i,:) and mask ) ; //------------------------------------------------------------------------------ // GrB_Vector_assign_[SCALAR]: scalar expansion assignment to subvector //------------------------------------------------------------------------------ // Assigns a single scalar to a subvector, w<mask>(I) = accum(w(I),x). The // scalar x is implicitly expanded into a vector u of size ni-by-1, with each // entry in u equal to x, and then w<mask>(I) = accum(w(I),u) is done. GB_PUBLIC GrB_Info GrB_Vector_assign_BOOL // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w(I),x) bool x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_INT8 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int8_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_UINT8 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint8_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_INT16 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int16_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_UINT16 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint16_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_INT32 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int32_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_UINT32 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint32_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_INT64 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) int64_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_UINT64 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) uint64_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_FP32 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) float x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_FP64 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) double x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_assign_FC32 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) GxB_FC32_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_assign_FC64 // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) GxB_FC64_t x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_UDT // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) void *x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_assign_Scalar // w<mask>(I) = accum (w(I),x) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(w(I),x) GrB_Scalar x, // scalar to assign to w(I) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Descriptor desc // descriptor for w and mask ) ; //------------------------------------------------------------------------------ // GrB_Matrix_assign_[SCALAR]: scalar expansion assignment to submatrix //------------------------------------------------------------------------------ // Assigns a single scalar to a submatrix, C<Mask>(I,J) = accum(C(I,J),x). The // scalar x is implicitly expanded into a matrix A of size ni-by-nj, with each // entry in A equal to x, and then C<Mask>(I,J) = accum(C(I,J),A) is done. GB_PUBLIC GrB_Info GrB_Matrix_assign_BOOL // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) bool x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_INT8 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int8_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_UINT8 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint8_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_INT16 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int16_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_UINT16 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint16_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_INT32 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int32_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_UINT32 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint32_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_INT64 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) int64_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_UINT64 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) uint64_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_FP32 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) float x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_FP64 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) double x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_assign_FC32 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) GxB_FC32_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_assign_FC64 // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) GxB_FC64_t x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_UDT // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) void *x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_assign_Scalar // C<Mask>(I,J) = accum (C(I,J),x) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C(I,J),x) GrB_Scalar x, // scalar to assign to C(I,J) const GrB_Index *I, // row indices GrB_Index ni, // number of row indices const GrB_Index *J, // column indices GrB_Index nj, // number of column indices const GrB_Descriptor desc // descriptor for C and Mask ) ; //------------------------------------------------------------------------------ // GrB_assign: generic submatrix/subvector assignment //------------------------------------------------------------------------------ // GrB_assign is a generic function that provides access to all specific // GrB_*_assign* functions: // GrB_Vector_assign_T (w,m,acc,x,I,ni,d) // w<m>(I) = acc(w(I),x) // GrB_Vector_assign (w,m,acc,u,I,ni,d) // w<m>(I) = acc(w(I),u) // GrB_Matrix_assign_T (C,M,acc,x,I,ni,J,nj,d) // C<M>(I,J) = acc(C(I,J),x) // GrB_Col_assign (C,m,acc,u,I,ni,j,d) // C<m>(I,j) = acc(C(I,j),u) // GrB_Row_assign (C,m,acc,u,i,J,nj,d) // C<m'>(i,J) = acc(C(i,J),u') // GrB_Matrix_assign (C,M,acc,A,I,ni,J,nj,d) // C<M>(I,J) = acc(C(I,J),A) #if GxB_STDC_VERSION >= 201112L #define GrB_assign(arg1,Mask,accum,arg4,arg5,...) \ _Generic \ ( \ (arg1), \ GrB_Vector : \ _Generic \ ( \ (arg4), \ GB_CASES (, GrB, Vector_assign) , \ const GrB_Scalar : GrB_Vector_assign_Scalar , \ GrB_Scalar : GrB_Vector_assign_Scalar , \ default: GrB_Vector_assign \ ), \ default: \ _Generic \ ( \ (arg4), \ GB_CASES (, GrB, Matrix_assign) , \ const GrB_Scalar : GrB_Matrix_assign_Scalar , \ GrB_Scalar : GrB_Matrix_assign_Scalar , \ const GrB_Vector : \ _Generic \ ( \ (arg5), \ const GrB_Index *: GrB_Col_assign , \ GrB_Index *: GrB_Col_assign , \ default: GrB_Row_assign \ ), \ GrB_Vector : \ _Generic \ ( \ (arg5), \ const GrB_Index *: GrB_Col_assign , \ GrB_Index *: GrB_Col_assign , \ default: GrB_Row_assign \ ), \ default: GrB_Matrix_assign \ ) \ ) \ (arg1, Mask, accum, arg4, arg5, __VA_ARGS__) #endif //============================================================================== // GrB_apply: matrix and vector apply //============================================================================== // Apply a unary, index_unary, or binary operator to entries in a matrix or // vector, C<M> = accum (C, op (A)). GB_PUBLIC GrB_Info GrB_Vector_apply // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_UnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply // C<Mask> = accum (C, op(A)) or op(A') ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_UnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; //------------------------------------------- // vector apply: binaryop variants (bind 1st) //------------------------------------------- // Apply a binary operator to the entries in a vector, binding the first // input to a scalar x, w<mask> = accum (w, op (x,u)). GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_Scalar // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Scalar x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; // historical: identical to GxB_Vector_apply_BinaryOp1st GB_PUBLIC GrB_Info GxB_Vector_apply_BinaryOp1st // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Scalar x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_BOOL // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries bool x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_INT8 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries int8_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_INT16 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries int16_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_INT32 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries int32_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_INT64 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries int64_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_UINT8 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries uint8_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_UINT16 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries uint16_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_UINT32 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries uint32_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_UINT64 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries uint64_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_FP32 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries float x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_FP64 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries double x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_apply_BinaryOp1st_FC32 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries GxB_FC32_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_apply_BinaryOp1st_FC64 // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries GxB_FC64_t x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp1st_UDT // w<mask> = accum (w, op(x,u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const void *x, // first input: scalar x const GrB_Vector u, // second input: vector u const GrB_Descriptor desc // descriptor for w and mask ) ; //------------------------------------------- // vector apply: binaryop variants (bind 2nd) //------------------------------------------- // Apply a binary operator to the entries in a vector, binding the second // input to a scalar y, w<mask> = accum (w, op (u,y)). GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_Scalar // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; // historical: identical to GrB_Vector_apply_BinaryOp2nd_Scalar GB_PUBLIC GrB_Info GxB_Vector_apply_BinaryOp2nd // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_BOOL // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u bool y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_INT8 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_INT16 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_INT32 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_INT64 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT8 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT16 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT32 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_UINT64 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_FP32 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u float y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_FP64 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u double y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_apply_BinaryOp2nd_FC32 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u GxB_FC32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_apply_BinaryOp2nd_FC64 // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u GxB_FC64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_BinaryOp2nd_UDT // w<mask> = accum (w, op(u,y)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const void *y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; //------------------------------------------- // vector apply: IndexUnaryOp variants //------------------------------------------- // Apply a GrB_IndexUnaryOp to the entries in a vector GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_Scalar // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_BOOL // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u bool y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_INT8 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_INT16 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_INT32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_INT64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_UINT8 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_UINT16 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_UINT32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_UINT64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_FP32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u float y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_FP64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u double y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_apply_IndexOp_FC32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u GxB_FC32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_apply_IndexOp_FC64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u GxB_FC64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_apply_IndexOp_UDT // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const void *y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; //------------------------------------------- // matrix apply: binaryop variants (bind 1st) //------------------------------------------- // Apply a binary operator to the entries in a matrix, binding the first input // to a scalar x, C<Mask> = accum (C, op (x,A)), or op(x,A'). GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_Scalar // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Scalar x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; // historical: identical to GrB_Matrix_apply_BinaryOp1st_Scalar GB_PUBLIC GrB_Info GxB_Matrix_apply_BinaryOp1st // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Scalar x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_BOOL // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries bool x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_INT8 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries int8_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_INT16 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries int16_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_INT32 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries int32_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_INT64 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries int64_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT8 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries uint8_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT16 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries uint16_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT32 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries uint32_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_UINT64 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries uint64_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_FP32 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries float x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_FP64 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries double x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_apply_BinaryOp1st_FC32 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries GxB_FC32_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_apply_BinaryOp1st_FC64 // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries GxB_FC64_t x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp1st_UDT // C<M>=accum(C,op(x,A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const void *x, // first input: scalar x const GrB_Matrix A, // second input: matrix A const GrB_Descriptor desc // descriptor for C, mask, and A ) ; //------------------------------------------- // matrix apply: binaryop variants (bind 2nd) //------------------------------------------- // Apply a binary operator to the entries in a matrix, binding the second input // to a scalar y, C<Mask> = accum (C, op (A,y)), or op(A',y). GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_Scalar // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; // historical: identical to GrB_Matrix_apply_BinaryOp2nd_Scalar GB_PUBLIC GrB_Info GxB_Matrix_apply_BinaryOp2nd // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_BOOL // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A bool y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT8 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT16 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT32 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_INT64 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT8 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT16 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT32 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_UINT64 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_FP32 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A float y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_FP64 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A double y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_apply_BinaryOp2nd_FC32 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A GxB_FC32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_apply_BinaryOp2nd_FC64 // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A GxB_FC64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_BinaryOp2nd_UDT // C<M>=accum(C,op(A,y)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const void *y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; //------------------------------------------- // matrix apply: IndexUnaryOp variants //------------------------------------------- // Apply a GrB_IndexUnaryOp to the entries in a matrix. GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_Scalar // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_BOOL // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A bool y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_INT8 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_INT16 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_INT32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_INT64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_UINT8 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_UINT16 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_UINT32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_UINT64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_FP32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A float y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_FP64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A double y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_apply_IndexOp_FC32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A GxB_FC32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_apply_IndexOp_FC64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A GxB_FC64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_apply_IndexOp_UDT // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const void *y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; //------------------------------------------------------------------------------ // GrB_apply: generic matrix/vector apply //------------------------------------------------------------------------------ // GrB_apply is a generic function for applying a unary operator to a matrix // or vector and provides access to these functions: // GrB_Vector_apply (w,mask,acc,op,u,d) // w<mask> = accum (w, op(u)) // GrB_Matrix_apply (C,Mask,acc,op,A,d) // C<Mask> = accum (C, op(A)) // GrB_Vector_apply (w,m,acc,unop ,u,d) // GrB_Vector_apply_BinaryOp1st_TYPE (w,m,acc,binop,x,u,d) // GrB_Vector_apply_BinaryOp2nd_TYPE (w,m,acc,binop,u,y,d) // GrB_Vector_apply_IndexOp_TYPE (w,m,acc,idxop,u,y,d) // GrB_Matrix_apply (C,M,acc,unop ,A,d) // GrB_Matrix_apply_BinaryOp1st_TYPE (C,M,acc,binop,x,A,d) // GrB_Matrix_apply_BinaryOp2nd_TYPE (C,M,acc,binop,A,y,d) // GrB_Matrix_apply_IndexOp_TYPE (C,M,acc,idxop,A,y,d) #if GxB_STDC_VERSION >= 201112L #define GB_BIND(kind,x,y,...) \ _Generic \ ( \ (x), \ const GrB_Scalar: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp1st_Scalar), \ GrB_Scalar: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp1st_Scalar), \ GB_CASES (, GrB, GB_CONCAT ( kind, _apply_BinaryOp1st,, )) , \ default: \ _Generic \ ( \ (y), \ GB_CASES (, GrB, GB_CONCAT ( kind , _apply_BinaryOp2nd,, )), \ default: GB_CONCAT ( GrB,_,kind,_apply_BinaryOp2nd_Scalar) \ ) \ ) #define GB_IDXOP(kind,A,y,...) \ _Generic \ ( \ (y), \ GB_CASES (, GrB, GB_CONCAT ( kind, _apply_IndexOp,, )), \ default: GB_CONCAT ( GrB, _, kind, _apply_IndexOp_Scalar) \ ) #define GrB_apply(C,Mask,accum,op,...) \ _Generic \ ( \ (C), \ GrB_Vector : \ _Generic \ ( \ (op), \ GrB_UnaryOp : GrB_Vector_apply , \ GrB_BinaryOp : GB_BIND (Vector, __VA_ARGS__), \ GrB_IndexUnaryOp : GB_IDXOP (Vector, __VA_ARGS__) \ ), \ GrB_Matrix : \ _Generic \ ( \ (op), \ GrB_UnaryOp : GrB_Matrix_apply , \ GrB_BinaryOp : GB_BIND (Matrix, __VA_ARGS__), \ GrB_IndexUnaryOp : GB_IDXOP (Matrix, __VA_ARGS__) \ ) \ ) \ (C, Mask, accum, op, __VA_ARGS__) #endif //============================================================================== // GrB_select: matrix and vector selection using an IndexUnaryOp //============================================================================== //------------------------------------------- // vector select using an IndexUnaryOp //------------------------------------------- GB_PUBLIC GrB_Info GrB_Vector_select_Scalar // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_BOOL // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u bool y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_INT8 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_INT16 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_INT32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_INT64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u int64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_UINT8 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_UINT16 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_UINT32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_UINT64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u uint64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_FP32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u float y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_FP64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u double y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_select_FC32 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u GxB_FC32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Vector_select_FC64 // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u GxB_FC64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GrB_Vector_select_UDT // w<mask> = accum (w, op(u)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const void *y, // second input: scalar y const GrB_Descriptor desc // descriptor for w and mask ) ; //------------------------------------------- // matrix select using an IndexUnaryOp //------------------------------------------- GB_PUBLIC GrB_Info GrB_Matrix_select_Scalar // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const GrB_Scalar y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_BOOL // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A bool y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_INT8 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_INT16 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_INT32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_INT64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A int64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_UINT8 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint8_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_UINT16 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint16_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_UINT32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_UINT64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A uint64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_FP32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A float y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_FP64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A double y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_select_FC32 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A GxB_FC32_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GxB_Matrix_select_FC64 // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A GxB_FC64_t y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_select_UDT // C<M>=accum(C,op(A)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_IndexUnaryOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const void *y, // second input: scalar y const GrB_Descriptor desc // descriptor for C, mask, and A ) ; // GrB_select is a generic method that applies an IndexUnaryOp to // a matrix or vector, using any type of the scalar y. // GrB_Vector_select_TYPE (w,m,acc,idxop,u,y,d) // GrB_Matrix_select_TYPE (C,M,acc,idxop,A,y,d) #if GxB_STDC_VERSION >= 201112L #define GrB_select(C,Mask,accum,op,x,y,d) \ _Generic \ ( \ (C), \ GrB_Vector : \ _Generic \ ( \ (y), \ GB_CASES (, GrB, Vector_select), \ default: GrB_Vector_select_Scalar \ ), \ GrB_Matrix : \ _Generic \ ( \ (y), \ GB_CASES (, GrB, Matrix_select), \ default: GrB_Matrix_select_Scalar \ ) \ ) \ (C, Mask, accum, op, x, y, d) #endif //============================================================================== // GxB_select: matrix and vector selection (historical) //============================================================================== // GrB_select and with the GrB_IndexUnaryOp operators should be used instead. GB_PUBLIC GrB_Info GxB_Vector_select // w<mask> = accum (w, op(u,k)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GxB_SelectOp op, // operator to apply to the entries const GrB_Vector u, // first input: vector u const GrB_Scalar Thunk, // optional input for the select operator const GrB_Descriptor desc // descriptor for w and mask ) ; GB_PUBLIC GrB_Info GxB_Matrix_select // C<Mask> = accum (C, op(A,k)) or op(A',k) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GxB_SelectOp op, // operator to apply to the entries const GrB_Matrix A, // first input: matrix A const GrB_Scalar Thunk, // optional input for the select operator const GrB_Descriptor desc // descriptor for C, mask, and A ) ; #if GxB_STDC_VERSION >= 201112L #define GxB_select(C,Mask,accum,op,A,Thunk,desc) \ _Generic \ ( \ (C), \ GrB_Vector : GxB_Vector_select , \ GrB_Matrix : GxB_Matrix_select \ ) \ (C, Mask, accum, op, A, Thunk, desc) #endif //============================================================================== // GrB_reduce: matrix and vector reduction //============================================================================== // Reduce the entries in a matrix to a vector, a column vector t such that // t(i) = sum (A (i,:)), and where "sum" is a commutative and associative // monoid with an identity value. A can be transposed, which reduces down the // columns instead of the rows. // For GrB_Matrix_reduce_BinaryOp, the GrB_BinaryOp op must correspond to a // known built-in monoid: // // operator data-types (all built-in) // ---------------------- --------------------------- // MIN, MAX INT*, UINT*, FP* // TIMES, PLUS INT*, UINT*, FP*, FC* // ANY INT*, UINT*, FP*, FC*, BOOL // LOR, LAND, LXOR, EQ BOOL // BOR, BAND, BXOR, BXNOR UINT* GB_PUBLIC GrB_Info GrB_Matrix_reduce_Monoid // w<mask> = accum (w,reduce(A)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_Monoid monoid, // reduce operator for t=reduce(A) const GrB_Matrix A, // first input: matrix A const GrB_Descriptor desc // descriptor for w, mask, and A ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_BinaryOp // w<mask> = accum (w,reduce(A)) ( GrB_Vector w, // input/output vector for results const GrB_Vector mask, // optional mask for w, unused if NULL const GrB_BinaryOp accum, // optional accum for z=accum(w,t) const GrB_BinaryOp op, // reduce operator for t=reduce(A) const GrB_Matrix A, // first input: matrix A const GrB_Descriptor desc // descriptor for w, mask, and A ) ; //------------------------------------------------------------------------------ // reduce a vector to a scalar //------------------------------------------------------------------------------ // Reduce entries in a vector to a scalar, c = accum (c, reduce_to_scalar(u)) GB_PUBLIC GrB_Info GrB_Vector_reduce_BOOL // c = accum (c, reduce_to_scalar (u)) ( bool *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_INT8 // c = accum (c, reduce_to_scalar (u)) ( int8_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_UINT8 // c = accum (c, reduce_to_scalar (u)) ( uint8_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_INT16 // c = accum (c, reduce_to_scalar (u)) ( int16_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_UINT16 // c = accum (c, reduce_to_scalar (u)) ( uint16_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_INT32 // c = accum (c, reduce_to_scalar (u)) ( int32_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_UINT32 // c = accum (c, reduce_to_scalar (u)) ( uint32_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_INT64 // c = accum (c, reduce_to_scalar (u)) ( int64_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_UINT64 // c = accum (c, reduce_to_scalar (u)) ( uint64_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_FP32 // c = accum (c, reduce_to_scalar (u)) ( float *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_FP64 // c = accum (c, reduce_to_scalar (u)) ( double *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_reduce_FC32 // c = accum (c, reduce_to_scalar (u)) ( GxB_FC32_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_reduce_FC64 // c = accum (c, reduce_to_scalar (u)) ( GxB_FC64_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_UDT // c = accum (c, reduce_to_scalar (u)) ( void *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_Monoid_Scalar // c = accum(c,reduce_to_scalar(u)) ( GrB_Scalar c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Vector_reduce_BinaryOp_Scalar ( GrB_Scalar c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_BinaryOp op, // binary op to do the reduction const GrB_Vector u, // vector to reduce const GrB_Descriptor desc ) ; //------------------------------------------------------------------------------ // reduce a matrix to a scalar //------------------------------------------------------------------------------ // Reduce entries in a matrix to a scalar, c = accum (c, reduce_to_scalar(A)) GB_PUBLIC GrB_Info GrB_Matrix_reduce_BOOL // c = accum (c, reduce_to_scalar (A)) ( bool *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_INT8 // c = accum (c, reduce_to_scalar (A)) ( int8_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_UINT8 // c = accum (c, reduce_to_scalar (A)) ( uint8_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_INT16 // c = accum (c, reduce_to_scalar (A)) ( int16_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_UINT16 // c = accum (c, reduce_to_scalar (A)) ( uint16_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_INT32 // c = accum (c, reduce_to_scalar (A)) ( int32_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_UINT32 // c = accum (c, reduce_to_scalar (A)) ( uint32_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_INT64 // c = accum (c, reduce_to_scalar (A)) ( int64_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_UINT64 // c = accum (c, reduce_to_scalar (A)) ( uint64_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_FP32 // c = accum (c, reduce_to_scalar (A)) ( float *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_FP64 // c = accum (c, reduce_to_scalar (A)) ( double *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_reduce_FC32 // c = accum (c, reduce_to_scalar (A)) ( GxB_FC32_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_reduce_FC64 // c = accum (c, reduce_to_scalar (A)) ( GxB_FC64_t *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_UDT // c = accum (c, reduce_to_scalar (A)) ( void *c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_Monoid_Scalar // c = accum(c,reduce_to_scalar(A)) ( GrB_Scalar c, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_Monoid monoid, // monoid to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GrB_Matrix_reduce_BinaryOp_Scalar ( GrB_Scalar S, // result scalar const GrB_BinaryOp accum, // optional accum for c=accum(c,t) const GrB_BinaryOp op, // binary op to do the reduction const GrB_Matrix A, // matrix to reduce const GrB_Descriptor desc ) ; //------------------------------------------------------------------------------ // GrB_reduce: generic matrix/vector reduction to a vector or scalar //------------------------------------------------------------------------------ // GrB_reduce is a generic function that provides access to all GrB_*reduce* // functions: // reduce matrix to vector: // GrB_Matrix_reduce_Monoid (w,mask,acc,mo,A,d) // w<mask> = acc (w,reduce(A)) // GrB_Matrix_reduce_BinaryOp (w,mask,acc,op,A,d) // w<mask> = acc (w,reduce(A)) // reduce matrix to scalar: // GrB_Vector_reduce_[SCALAR] (c,acc,monoid,u,d) // c = acc (c,reduce(u)) // GrB_Matrix_reduce_[SCALAR] (c,acc,monoid,A,d) // c = acc (c,reduce(A)) // GrB_Vector_reduce_Monoid_Scalar (s,acc,monoid,u,d) // s = acc (s,reduce(u)) // GrB_Matrix_reduce_Monoid_Scalar (s,acc,monoid,A,d) // s = acc (s,reduce(A)) // GrB_Vector_reduce_BinaryOp_Scalar (s,acc,op,u,d) // s = acc (s,reduce(u)) // GrB_Matrix_reduce_BinaryOp_Scalar (s,acc,op,A,d) // s = acc (s,reduce(A)) #if GxB_STDC_VERSION >= 201112L #define GB_REDUCE_TO_SCALAR(kind,c,op) \ _Generic \ ( \ (c), \ GB_CASES (*, GrB, GB_CONCAT ( kind, _reduce,, )), \ default: \ _Generic \ ( \ (op), \ const GrB_BinaryOp : \ GB_CONCAT (GrB,_,kind,_reduce_BinaryOp_Scalar),\ GrB_BinaryOp : \ GB_CONCAT (GrB,_,kind,_reduce_BinaryOp_Scalar),\ default: GB_CONCAT (GrB,_,kind,_reduce_Monoid_Scalar) \ ) \ ) #define GrB_reduce(arg1,arg2,arg3,arg4,...) \ _Generic \ ( \ (arg4), \ const GrB_Vector : GB_REDUCE_TO_SCALAR (Vector, arg1, arg3), \ GrB_Vector : GB_REDUCE_TO_SCALAR (Vector, arg1, arg3), \ const GrB_Matrix : GB_REDUCE_TO_SCALAR (Matrix, arg1, arg3), \ GrB_Matrix : GB_REDUCE_TO_SCALAR (Matrix, arg1, arg3), \ const GrB_Monoid : GrB_Matrix_reduce_Monoid , \ GrB_Monoid : GrB_Matrix_reduce_Monoid , \ const GrB_BinaryOp : GrB_Matrix_reduce_BinaryOp , \ GrB_BinaryOp : GrB_Matrix_reduce_BinaryOp \ ) \ (arg1, arg2, arg3, arg4, __VA_ARGS__) #endif //============================================================================== // GrB_transpose: matrix transpose //============================================================================== GB_PUBLIC GrB_Info GrB_transpose // C<Mask> = accum (C, A') ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Matrix A, // first input: matrix A const GrB_Descriptor desc // descriptor for C, Mask, and A ) ; //============================================================================== // GrB_kronecker: Kronecker product //============================================================================== // GxB_kron is historical; use GrB_kronecker instead GB_PUBLIC GrB_Info GxB_kron // C<Mask> = accum(C,kron(A,B)) (historical) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix Mask, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // defines '*' for T=kron(A,B) const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, Mask, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_kronecker_BinaryOp // C<M> = accum (C, kron(A,B)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix M, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_BinaryOp op, // defines '*' for T=kron(A,B) const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, M, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_kronecker_Monoid // C<M> = accum (C, kron(A,B)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix M, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Monoid monoid, // defines '*' for T=kron(A,B) const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, M, A, and B ) ; GB_PUBLIC GrB_Info GrB_Matrix_kronecker_Semiring // C<M> = accum (C, kron(A,B)) ( GrB_Matrix C, // input/output matrix for results const GrB_Matrix M, // optional mask for C, unused if NULL const GrB_BinaryOp accum, // optional accum for Z=accum(C,T) const GrB_Semiring semiring, // defines '*' for T=kron(A,B) const GrB_Matrix A, // first input: matrix A const GrB_Matrix B, // second input: matrix B const GrB_Descriptor desc // descriptor for C, M, A, and B ) ; #if GxB_STDC_VERSION >= 201112L #define GrB_kronecker(C,Mask,accum,op,A,B,desc) \ _Generic \ ( \ (op), \ const GrB_Semiring : GrB_Matrix_kronecker_Semiring , \ GrB_Semiring : GrB_Matrix_kronecker_Semiring , \ const GrB_Monoid : GrB_Matrix_kronecker_Monoid , \ GrB_Monoid : GrB_Matrix_kronecker_Monoid , \ const GrB_BinaryOp : GrB_Matrix_kronecker_BinaryOp , \ GrB_BinaryOp : GrB_Matrix_kronecker_BinaryOp \ ) \ (C, Mask, accum, op, A, B, desc) #endif //============================================================================== // GrB_Monoid: built-in monoids //============================================================================== GB_PUBLIC GrB_Monoid //-------------------------------------------------------------------------- // 10 MIN monoids: (not for complex types) //-------------------------------------------------------------------------- // GxB_MIN monoids, historical, use GrB_MIN_MONOID_* instead: GxB_MIN_INT8_MONOID, // identity: INT8_MAX terminal: INT8_MIN GxB_MIN_INT16_MONOID, // identity: INT16_MAX terminal: INT16_MIN GxB_MIN_INT32_MONOID, // identity: INT32_MAX terminal: INT32_MIN GxB_MIN_INT64_MONOID, // identity: INT64_MAX terminal: INT32_MIN GxB_MIN_UINT8_MONOID, // identity: UINT8_MAX terminal: 0 GxB_MIN_UINT16_MONOID, // identity: UINT16_MAX terminal: 0 GxB_MIN_UINT32_MONOID, // identity: UINT32_MAX terminal: 0 GxB_MIN_UINT64_MONOID, // identity: UINT64_MAX terminal: 0 GxB_MIN_FP32_MONOID, // identity: INFINITY terminal: -INFINITY GxB_MIN_FP64_MONOID, // identity: INFINITY terminal: -INFINITY // preferred names from the v1.3 spec: GrB_MIN_MONOID_INT8, // identity: INT8_MAX terminal: INT8_MIN GrB_MIN_MONOID_INT16, // identity: INT16_MAX terminal: INT16_MIN GrB_MIN_MONOID_INT32, // identity: INT32_MAX terminal: INT32_MIN GrB_MIN_MONOID_INT64, // identity: INT64_MAX terminal: INT32_MIN GrB_MIN_MONOID_UINT8, // identity: UINT8_MAX terminal: 0 GrB_MIN_MONOID_UINT16, // identity: UINT16_MAX terminal: 0 GrB_MIN_MONOID_UINT32, // identity: UINT32_MAX terminal: 0 GrB_MIN_MONOID_UINT64, // identity: UINT64_MAX terminal: 0 GrB_MIN_MONOID_FP32, // identity: INFINITY terminal: -INFINITY GrB_MIN_MONOID_FP64, // identity: INFINITY terminal: -INFINITY //-------------------------------------------------------------------------- // 10 MAX monoids: //-------------------------------------------------------------------------- // GxB_MAX monoids, historical, use GrB_MAX_MONOID_* instead: GxB_MAX_INT8_MONOID, // identity: INT8_MIN terminal: INT8_MAX GxB_MAX_INT16_MONOID, // identity: INT16_MIN terminal: INT16_MAX GxB_MAX_INT32_MONOID, // identity: INT32_MIN terminal: INT32_MAX GxB_MAX_INT64_MONOID, // identity: INT64_MIN terminal: INT64_MAX GxB_MAX_UINT8_MONOID, // identity: 0 terminal: UINT8_MAX GxB_MAX_UINT16_MONOID, // identity: 0 terminal: UINT16_MAX GxB_MAX_UINT32_MONOID, // identity: 0 terminal: UINT32_MAX GxB_MAX_UINT64_MONOID, // identity: 0 terminal: UINT64_MAX GxB_MAX_FP32_MONOID, // identity: -INFINITY terminal: INFINITY GxB_MAX_FP64_MONOID, // identity: -INFINITY terminal: INFINITY // preferred names from the v1.3 spec: GrB_MAX_MONOID_INT8, // identity: INT8_MIN terminal: INT8_MAX GrB_MAX_MONOID_INT16, // identity: INT16_MIN terminal: INT16_MAX GrB_MAX_MONOID_INT32, // identity: INT32_MIN terminal: INT32_MAX GrB_MAX_MONOID_INT64, // identity: INT64_MIN terminal: INT64_MAX GrB_MAX_MONOID_UINT8, // identity: 0 terminal: UINT8_MAX GrB_MAX_MONOID_UINT16, // identity: 0 terminal: UINT16_MAX GrB_MAX_MONOID_UINT32, // identity: 0 terminal: UINT32_MAX GrB_MAX_MONOID_UINT64, // identity: 0 terminal: UINT64_MAX GrB_MAX_MONOID_FP32, // identity: -INFINITY terminal: INFINITY GrB_MAX_MONOID_FP64, // identity: -INFINITY terminal: INFINITY //-------------------------------------------------------------------------- // 12 PLUS monoids: //-------------------------------------------------------------------------- // GxB_PLUS monoids, historical, use GrB_PLUS_MONOID_* instead: GxB_PLUS_INT8_MONOID, // identity: 0 GxB_PLUS_INT16_MONOID, // identity: 0 GxB_PLUS_INT32_MONOID, // identity: 0 GxB_PLUS_INT64_MONOID, // identity: 0 GxB_PLUS_UINT8_MONOID, // identity: 0 GxB_PLUS_UINT16_MONOID, // identity: 0 GxB_PLUS_UINT32_MONOID, // identity: 0 GxB_PLUS_UINT64_MONOID, // identity: 0 GxB_PLUS_FP32_MONOID, // identity: 0 GxB_PLUS_FP64_MONOID, // identity: 0 // preferred names from the v1.3 spec: GrB_PLUS_MONOID_INT8, // identity: 0 GrB_PLUS_MONOID_INT16, // identity: 0 GrB_PLUS_MONOID_INT32, // identity: 0 GrB_PLUS_MONOID_INT64, // identity: 0 GrB_PLUS_MONOID_UINT8, // identity: 0 GrB_PLUS_MONOID_UINT16, // identity: 0 GrB_PLUS_MONOID_UINT32, // identity: 0 GrB_PLUS_MONOID_UINT64, // identity: 0 GrB_PLUS_MONOID_FP32, // identity: 0 GrB_PLUS_MONOID_FP64, // identity: 0 // complex monoids: GxB_PLUS_FC32_MONOID, // identity: 0 GxB_PLUS_FC64_MONOID, // identity: 0 //-------------------------------------------------------------------------- // 12 TIMES monoids: identity value is 1, int* and uint* are terminal //-------------------------------------------------------------------------- // GxB_TIMES monoids, historical, use GrB_TIMES_MONOID_* instead: GxB_TIMES_INT8_MONOID, // identity: 1 terminal: 0 GxB_TIMES_INT16_MONOID, // identity: 1 terminal: 0 GxB_TIMES_INT32_MONOID, // identity: 1 terminal: 0 GxB_TIMES_INT64_MONOID, // identity: 1 terminal: 0 GxB_TIMES_UINT8_MONOID, // identity: 1 terminal: 0 GxB_TIMES_UINT16_MONOID, // identity: 1 terminal: 0 GxB_TIMES_UINT32_MONOID, // identity: 1 terminal: 0 GxB_TIMES_UINT64_MONOID, // identity: 1 terminal: 0 GxB_TIMES_FP32_MONOID, // identity: 1 GxB_TIMES_FP64_MONOID, // identity: 1 // preferred names from the v1.3 spec: GrB_TIMES_MONOID_INT8, // identity: 1 terminal: 0 GrB_TIMES_MONOID_INT16, // identity: 1 terminal: 0 GrB_TIMES_MONOID_INT32, // identity: 1 terminal: 0 GrB_TIMES_MONOID_INT64, // identity: 1 terminal: 0 GrB_TIMES_MONOID_UINT8, // identity: 1 terminal: 0 GrB_TIMES_MONOID_UINT16, // identity: 1 terminal: 0 GrB_TIMES_MONOID_UINT32, // identity: 1 terminal: 0 GrB_TIMES_MONOID_UINT64, // identity: 1 terminal: 0 GrB_TIMES_MONOID_FP32, // identity: 1 GrB_TIMES_MONOID_FP64, // identity: 1 // complex monoids: GxB_TIMES_FC32_MONOID, // identity: 1 GxB_TIMES_FC64_MONOID, // identity: 1 //-------------------------------------------------------------------------- // 13 ANY monoids: //-------------------------------------------------------------------------- GxB_ANY_BOOL_MONOID, // identity: any value terminal: any value GxB_ANY_INT8_MONOID, // identity: any value terminal: any value GxB_ANY_INT16_MONOID, // identity: any value terminal: any value GxB_ANY_INT32_MONOID, // identity: any value terminal: any value GxB_ANY_INT64_MONOID, // identity: any value terminal: any value GxB_ANY_UINT8_MONOID, // identity: any value terminal: any value GxB_ANY_UINT16_MONOID, // identity: any value terminal: any value GxB_ANY_UINT32_MONOID, // identity: any value terminal: any value GxB_ANY_UINT64_MONOID, // identity: any value terminal: any value GxB_ANY_FP32_MONOID, // identity: any value terminal: any value GxB_ANY_FP64_MONOID, // identity: any value terminal: any value GxB_ANY_FC32_MONOID, // identity: any value terminal: any value GxB_ANY_FC64_MONOID, // identity: any value terminal: any value //-------------------------------------------------------------------------- // 4 Boolean monoids: (see also the GxB_ANY_BOOL_MONOID above) //-------------------------------------------------------------------------- // GxB_* boolean monoids, historical, use GrB_* instead: GxB_LOR_BOOL_MONOID, // identity: false terminal: true GxB_LAND_BOOL_MONOID, // identity: true terminal: false GxB_LXOR_BOOL_MONOID, // identity: false GxB_LXNOR_BOOL_MONOID, // identity: true GxB_EQ_BOOL_MONOID, // (alternative name for GrB_LXNOR_MONOID_BOOL) // preferred names from the v1.3 spec: GrB_LOR_MONOID_BOOL, // identity: false terminal: true GrB_LAND_MONOID_BOOL, // identity: true terminal: false GrB_LXOR_MONOID_BOOL, // identity: false GrB_LXNOR_MONOID_BOOL, // identity: true //-------------------------------------------------------------------------- // 16 Bitwise-or monoids: //-------------------------------------------------------------------------- // BOR monoids (bitwise or): GxB_BOR_UINT8_MONOID, // identity: 0 terminal: 0xFF GxB_BOR_UINT16_MONOID, // identity: 0 terminal: 0xFFFF GxB_BOR_UINT32_MONOID, // identity: 0 terminal: 0xFFFFFFFF GxB_BOR_UINT64_MONOID, // identity: 0 terminal: 0xFFFFFFFFFFFFFFFF // BAND monoids (bitwise and): GxB_BAND_UINT8_MONOID, // identity: 0xFF terminal: 0 GxB_BAND_UINT16_MONOID, // identity: 0xFFFF terminal: 0 GxB_BAND_UINT32_MONOID, // identity: 0xFFFFFFFF terminal: 0 GxB_BAND_UINT64_MONOID, // identity: 0xFFFFFFFFFFFFFFFF terminal: 0 // BXOR monoids (bitwise xor): GxB_BXOR_UINT8_MONOID, // identity: 0 GxB_BXOR_UINT16_MONOID, // identity: 0 GxB_BXOR_UINT32_MONOID, // identity: 0 GxB_BXOR_UINT64_MONOID, // identity: 0 // BXNOR monoids (bitwise xnor): GxB_BXNOR_UINT8_MONOID, // identity: 0xFF GxB_BXNOR_UINT16_MONOID, // identity: 0xFFFF GxB_BXNOR_UINT32_MONOID, // identity: 0xFFFFFFFF GxB_BXNOR_UINT64_MONOID ; // identity: 0xFFFFFFFFFFFFFFFF //============================================================================== // GrB_Semiring: built-in semirings //============================================================================== // Using built-in types and operators, SuiteSparse:GraphBLAS provides // 1553 pre-defined, built-in semirings: // 1000 semirings with a multiply operator TxT -> T where T is non-Boolean, // from the complete cross product of: // 5 monoids: MIN, MAX, PLUS, TIMES, ANY // 20 multiply operators: // FIRST, SECOND, PAIR (=ONEB), MIN, MAX, PLUS, MINUS, TIMES, DIV, // RDIV, RMINUS // ISEQ, ISNE, ISGT, ISLT, ISGE, ISLE, // LOR, LAND, LXOR // 10 non-Boolean real types, T // // Note that min_pair, max_pair, times_pair are all identical to any_pair. // These 30 semirings are named below, but are internally remapped to // their corresponding any_pair semiring. // 300 semirings with a comparator TxT -> bool, where T is // non-Boolean, from the complete cross product of: // 5 Boolean monoids: LAND, LOR, LXOR, EQ (=LXNOR), ANY // 6 multiply operators: EQ, NE, GT, LT, GE, LE // 10 non-Boolean real types, T // 55 semirings with purely Boolean types, bool x bool -> bool, from the // complete cross product of: // 5 Boolean monoids LAND, LOR, LXOR, EQ (=LXNOR), ANY // 11 multiply operators: // FIRST, SECOND, LOR, LAND, LXOR, EQ (=LXNOR), GT, LT, GE, LE, // PAIR (=ONEB) // // Note that lor_pair, land_pair, and eq_pair are all identical to // any_pair. These 3 semirings are named below, but are internally // remapped to any_pair_bool semiring. // 54 complex semirings: TxT -> T where T is float complex or double complex: // 3 complex monoids: PLUS, TIMES, ANY // 9 complex multiply operators: // FIRST, SECOND, PAIR (=ONEB), PLUS, MINUS, TIMES, DIV, RDIV, RMINUS // 2 complex types // // Note that times_pair is identical to any_pair. // These 2 semirings are named below, but are internally remapped to // their corresponding any_pair semiring. // 64 bitwise semirings: TxT -> T where T is an unsigned integer: // 4 bitwise monoids: BOR, BAND, BXOR, BXNOR // 4 bitwise multiply operators: BOR, BAND, BXOR, BXNOR // 4 unsigned integer types: UINT8, UINT16, UINT32, UINT64 // 80 positional semirings: XxX -> T where T is int64 or int32, and the type of // X is ignored: // 5 monoids: MIN, MAX, PLUS, TIMES, ANY // 8 multiply operators: // FIRSTI, FIRSTI1, FIRSTJ, FIRSTJ1, // SECONDI, SECONDI1, SECONDJ, SECONDJ1 // 2 types: int32, int64 // The ANY operator is also valid to use as a multiplicative operator in a // semiring, but serves no purpose in that case. The ANY operator is meant as // a fast additive operator for a monoid, that terminates, or short-circuits, // as soon as any value is found. A valid user semiring can be constructed // with ANY as the multiply operator, but they are not predefined below. // Likewise, additional built-in operators can be used as multiplicative // operators for floating-point semirings (POW, ATAN2, HYPOT, ...) and many // more semirings can be constructed from bitwise monoids and many integer // binary (non-bitwise) multiplicative operators, but these are not // pre-defined. // In the names below, each semiring has a name of the form GxB_add_mult_T // where add is the additive monoid, mult is the multiply operator, and T is // the type. The type T is always the type of x and y for the z=mult(x,y) // operator. The monoid's three types and the ztype of the mult operator are // always the same. This is the type T for the first set, and Boolean for // the second and third sets of semirngs. // 1553 = 1000 + 300 + 55 + 54 + 64 + 80 semirings are named below, but 35 = 30 // + 3 + 2 are identical to the corresponding any_pair semirings of the same // type. For positional semirings, the mulitiply ops FIRSTJ and SECONDI are // identical, as are FIRSTJ1 and SECONDI1. These semirings still appear as // predefined, for convenience. GB_PUBLIC GrB_Semiring //------------------------------------------------------------------------------ // 1000 non-Boolean semirings where all types are the same, given by suffix _T //------------------------------------------------------------------------------ // semirings with multiply op: z = FIRST (x,y), all types x,y,z the same: GxB_MIN_FIRST_INT8 , GxB_MAX_FIRST_INT8 , GxB_PLUS_FIRST_INT8 , GxB_TIMES_FIRST_INT8 , GxB_ANY_FIRST_INT8 , GxB_MIN_FIRST_INT16 , GxB_MAX_FIRST_INT16 , GxB_PLUS_FIRST_INT16 , GxB_TIMES_FIRST_INT16 , GxB_ANY_FIRST_INT16 , GxB_MIN_FIRST_INT32 , GxB_MAX_FIRST_INT32 , GxB_PLUS_FIRST_INT32 , GxB_TIMES_FIRST_INT32 , GxB_ANY_FIRST_INT32 , GxB_MIN_FIRST_INT64 , GxB_MAX_FIRST_INT64 , GxB_PLUS_FIRST_INT64 , GxB_TIMES_FIRST_INT64 , GxB_ANY_FIRST_INT64 , GxB_MIN_FIRST_UINT8 , GxB_MAX_FIRST_UINT8 , GxB_PLUS_FIRST_UINT8 , GxB_TIMES_FIRST_UINT8 , GxB_ANY_FIRST_UINT8 , GxB_MIN_FIRST_UINT16 , GxB_MAX_FIRST_UINT16 , GxB_PLUS_FIRST_UINT16 , GxB_TIMES_FIRST_UINT16 , GxB_ANY_FIRST_UINT16 , GxB_MIN_FIRST_UINT32 , GxB_MAX_FIRST_UINT32 , GxB_PLUS_FIRST_UINT32 , GxB_TIMES_FIRST_UINT32 , GxB_ANY_FIRST_UINT32 , GxB_MIN_FIRST_UINT64 , GxB_MAX_FIRST_UINT64 , GxB_PLUS_FIRST_UINT64 , GxB_TIMES_FIRST_UINT64 , GxB_ANY_FIRST_UINT64 , GxB_MIN_FIRST_FP32 , GxB_MAX_FIRST_FP32 , GxB_PLUS_FIRST_FP32 , GxB_TIMES_FIRST_FP32 , GxB_ANY_FIRST_FP32 , GxB_MIN_FIRST_FP64 , GxB_MAX_FIRST_FP64 , GxB_PLUS_FIRST_FP64 , GxB_TIMES_FIRST_FP64 , GxB_ANY_FIRST_FP64 , // semirings with multiply op: z = SECOND (x,y), all types x,y,z the same: GxB_MIN_SECOND_INT8 , GxB_MAX_SECOND_INT8 , GxB_PLUS_SECOND_INT8 , GxB_TIMES_SECOND_INT8 , GxB_ANY_SECOND_INT8 , GxB_MIN_SECOND_INT16 , GxB_MAX_SECOND_INT16 , GxB_PLUS_SECOND_INT16 , GxB_TIMES_SECOND_INT16 , GxB_ANY_SECOND_INT16 , GxB_MIN_SECOND_INT32 , GxB_MAX_SECOND_INT32 , GxB_PLUS_SECOND_INT32 , GxB_TIMES_SECOND_INT32 , GxB_ANY_SECOND_INT32 , GxB_MIN_SECOND_INT64 , GxB_MAX_SECOND_INT64 , GxB_PLUS_SECOND_INT64 , GxB_TIMES_SECOND_INT64 , GxB_ANY_SECOND_INT64 , GxB_MIN_SECOND_UINT8 , GxB_MAX_SECOND_UINT8 , GxB_PLUS_SECOND_UINT8 , GxB_TIMES_SECOND_UINT8 , GxB_ANY_SECOND_UINT8 , GxB_MIN_SECOND_UINT16 , GxB_MAX_SECOND_UINT16 , GxB_PLUS_SECOND_UINT16 , GxB_TIMES_SECOND_UINT16, GxB_ANY_SECOND_UINT16 , GxB_MIN_SECOND_UINT32 , GxB_MAX_SECOND_UINT32 , GxB_PLUS_SECOND_UINT32 , GxB_TIMES_SECOND_UINT32, GxB_ANY_SECOND_UINT32 , GxB_MIN_SECOND_UINT64 , GxB_MAX_SECOND_UINT64 , GxB_PLUS_SECOND_UINT64 , GxB_TIMES_SECOND_UINT64, GxB_ANY_SECOND_UINT64 , GxB_MIN_SECOND_FP32 , GxB_MAX_SECOND_FP32 , GxB_PLUS_SECOND_FP32 , GxB_TIMES_SECOND_FP32 , GxB_ANY_SECOND_FP32 , GxB_MIN_SECOND_FP64 , GxB_MAX_SECOND_FP64 , GxB_PLUS_SECOND_FP64 , GxB_TIMES_SECOND_FP64 , GxB_ANY_SECOND_FP64 , // semirings with multiply op: z = PAIR (x,y), all types x,y,z the same: // (note that min_pair, max_pair, times_pair are all identical to any_pair, and are marked below) GxB_MIN_PAIR_INT8 /**/, GxB_MAX_PAIR_INT8 /**/, GxB_PLUS_PAIR_INT8 , GxB_TIMES_PAIR_INT8 /**/, GxB_ANY_PAIR_INT8 , GxB_MIN_PAIR_INT16 /**/, GxB_MAX_PAIR_INT16 /**/, GxB_PLUS_PAIR_INT16 , GxB_TIMES_PAIR_INT16 /**/, GxB_ANY_PAIR_INT16 , GxB_MIN_PAIR_INT32 /**/, GxB_MAX_PAIR_INT32 /**/, GxB_PLUS_PAIR_INT32 , GxB_TIMES_PAIR_INT32 /**/, GxB_ANY_PAIR_INT32 , GxB_MIN_PAIR_INT64 /**/, GxB_MAX_PAIR_INT64 /**/, GxB_PLUS_PAIR_INT64 , GxB_TIMES_PAIR_INT64 /**/, GxB_ANY_PAIR_INT64 , GxB_MIN_PAIR_UINT8 /**/, GxB_MAX_PAIR_UINT8 /**/, GxB_PLUS_PAIR_UINT8 , GxB_TIMES_PAIR_UINT8 /**/, GxB_ANY_PAIR_UINT8 , GxB_MIN_PAIR_UINT16/**/, GxB_MAX_PAIR_UINT16/**/, GxB_PLUS_PAIR_UINT16 , GxB_TIMES_PAIR_UINT16/**/, GxB_ANY_PAIR_UINT16 , GxB_MIN_PAIR_UINT32/**/, GxB_MAX_PAIR_UINT32/**/, GxB_PLUS_PAIR_UINT32 , GxB_TIMES_PAIR_UINT32/**/, GxB_ANY_PAIR_UINT32 , GxB_MIN_PAIR_UINT64/**/, GxB_MAX_PAIR_UINT64/**/, GxB_PLUS_PAIR_UINT64 , GxB_TIMES_PAIR_UINT64/**/, GxB_ANY_PAIR_UINT64 , GxB_MIN_PAIR_FP32 /**/, GxB_MAX_PAIR_FP32 /**/, GxB_PLUS_PAIR_FP32 , GxB_TIMES_PAIR_FP32 /**/, GxB_ANY_PAIR_FP32 , GxB_MIN_PAIR_FP64 /**/, GxB_MAX_PAIR_FP64 /**/, GxB_PLUS_PAIR_FP64 , GxB_TIMES_PAIR_FP64 /**/, GxB_ANY_PAIR_FP64 , // semirings with multiply op: z = MIN (x,y), all types x,y,z the same: GxB_MIN_MIN_INT8 , GxB_MAX_MIN_INT8 , GxB_PLUS_MIN_INT8 , GxB_TIMES_MIN_INT8 , GxB_ANY_MIN_INT8 , GxB_MIN_MIN_INT16 , GxB_MAX_MIN_INT16 , GxB_PLUS_MIN_INT16 , GxB_TIMES_MIN_INT16 , GxB_ANY_MIN_INT16 , GxB_MIN_MIN_INT32 , GxB_MAX_MIN_INT32 , GxB_PLUS_MIN_INT32 , GxB_TIMES_MIN_INT32 , GxB_ANY_MIN_INT32 , GxB_MIN_MIN_INT64 , GxB_MAX_MIN_INT64 , GxB_PLUS_MIN_INT64 , GxB_TIMES_MIN_INT64 , GxB_ANY_MIN_INT64 , GxB_MIN_MIN_UINT8 , GxB_MAX_MIN_UINT8 , GxB_PLUS_MIN_UINT8 , GxB_TIMES_MIN_UINT8 , GxB_ANY_MIN_UINT8 , GxB_MIN_MIN_UINT16 , GxB_MAX_MIN_UINT16 , GxB_PLUS_MIN_UINT16 , GxB_TIMES_MIN_UINT16 , GxB_ANY_MIN_UINT16 , GxB_MIN_MIN_UINT32 , GxB_MAX_MIN_UINT32 , GxB_PLUS_MIN_UINT32 , GxB_TIMES_MIN_UINT32 , GxB_ANY_MIN_UINT32 , GxB_MIN_MIN_UINT64 , GxB_MAX_MIN_UINT64 , GxB_PLUS_MIN_UINT64 , GxB_TIMES_MIN_UINT64 , GxB_ANY_MIN_UINT64 , GxB_MIN_MIN_FP32 , GxB_MAX_MIN_FP32 , GxB_PLUS_MIN_FP32 , GxB_TIMES_MIN_FP32 , GxB_ANY_MIN_FP32 , GxB_MIN_MIN_FP64 , GxB_MAX_MIN_FP64 , GxB_PLUS_MIN_FP64 , GxB_TIMES_MIN_FP64 , GxB_ANY_MIN_FP64 , // semirings with multiply op: z = MAX (x,y), all types x,y,z the same: GxB_MIN_MAX_INT8 , GxB_MAX_MAX_INT8 , GxB_PLUS_MAX_INT8 , GxB_TIMES_MAX_INT8 , GxB_ANY_MAX_INT8 , GxB_MIN_MAX_INT16 , GxB_MAX_MAX_INT16 , GxB_PLUS_MAX_INT16 , GxB_TIMES_MAX_INT16 , GxB_ANY_MAX_INT16 , GxB_MIN_MAX_INT32 , GxB_MAX_MAX_INT32 , GxB_PLUS_MAX_INT32 , GxB_TIMES_MAX_INT32 , GxB_ANY_MAX_INT32 , GxB_MIN_MAX_INT64 , GxB_MAX_MAX_INT64 , GxB_PLUS_MAX_INT64 , GxB_TIMES_MAX_INT64 , GxB_ANY_MAX_INT64 , GxB_MIN_MAX_UINT8 , GxB_MAX_MAX_UINT8 , GxB_PLUS_MAX_UINT8 , GxB_TIMES_MAX_UINT8 , GxB_ANY_MAX_UINT8 , GxB_MIN_MAX_UINT16 , GxB_MAX_MAX_UINT16 , GxB_PLUS_MAX_UINT16 , GxB_TIMES_MAX_UINT16 , GxB_ANY_MAX_UINT16 , GxB_MIN_MAX_UINT32 , GxB_MAX_MAX_UINT32 , GxB_PLUS_MAX_UINT32 , GxB_TIMES_MAX_UINT32 , GxB_ANY_MAX_UINT32 , GxB_MIN_MAX_UINT64 , GxB_MAX_MAX_UINT64 , GxB_PLUS_MAX_UINT64 , GxB_TIMES_MAX_UINT64 , GxB_ANY_MAX_UINT64 , GxB_MIN_MAX_FP32 , GxB_MAX_MAX_FP32 , GxB_PLUS_MAX_FP32 , GxB_TIMES_MAX_FP32 , GxB_ANY_MAX_FP32 , GxB_MIN_MAX_FP64 , GxB_MAX_MAX_FP64 , GxB_PLUS_MAX_FP64 , GxB_TIMES_MAX_FP64 , GxB_ANY_MAX_FP64 , // semirings with multiply op: z = PLUS (x,y), all types x,y,z the same: GxB_MIN_PLUS_INT8 , GxB_MAX_PLUS_INT8 , GxB_PLUS_PLUS_INT8 , GxB_TIMES_PLUS_INT8 , GxB_ANY_PLUS_INT8 , GxB_MIN_PLUS_INT16 , GxB_MAX_PLUS_INT16 , GxB_PLUS_PLUS_INT16 , GxB_TIMES_PLUS_INT16 , GxB_ANY_PLUS_INT16 , GxB_MIN_PLUS_INT32 , GxB_MAX_PLUS_INT32 , GxB_PLUS_PLUS_INT32 , GxB_TIMES_PLUS_INT32 , GxB_ANY_PLUS_INT32 , GxB_MIN_PLUS_INT64 , GxB_MAX_PLUS_INT64 , GxB_PLUS_PLUS_INT64 , GxB_TIMES_PLUS_INT64 , GxB_ANY_PLUS_INT64 , GxB_MIN_PLUS_UINT8 , GxB_MAX_PLUS_UINT8 , GxB_PLUS_PLUS_UINT8 , GxB_TIMES_PLUS_UINT8 , GxB_ANY_PLUS_UINT8 , GxB_MIN_PLUS_UINT16 , GxB_MAX_PLUS_UINT16 , GxB_PLUS_PLUS_UINT16 , GxB_TIMES_PLUS_UINT16 , GxB_ANY_PLUS_UINT16 , GxB_MIN_PLUS_UINT32 , GxB_MAX_PLUS_UINT32 , GxB_PLUS_PLUS_UINT32 , GxB_TIMES_PLUS_UINT32 , GxB_ANY_PLUS_UINT32 , GxB_MIN_PLUS_UINT64 , GxB_MAX_PLUS_UINT64 , GxB_PLUS_PLUS_UINT64 , GxB_TIMES_PLUS_UINT64 , GxB_ANY_PLUS_UINT64 , GxB_MIN_PLUS_FP32 , GxB_MAX_PLUS_FP32 , GxB_PLUS_PLUS_FP32 , GxB_TIMES_PLUS_FP32 , GxB_ANY_PLUS_FP32 , GxB_MIN_PLUS_FP64 , GxB_MAX_PLUS_FP64 , GxB_PLUS_PLUS_FP64 , GxB_TIMES_PLUS_FP64 , GxB_ANY_PLUS_FP64 , // semirings with multiply op: z = MINUS (x,y), all types x,y,z the same: GxB_MIN_MINUS_INT8 , GxB_MAX_MINUS_INT8 , GxB_PLUS_MINUS_INT8 , GxB_TIMES_MINUS_INT8 , GxB_ANY_MINUS_INT8 , GxB_MIN_MINUS_INT16 , GxB_MAX_MINUS_INT16 , GxB_PLUS_MINUS_INT16 , GxB_TIMES_MINUS_INT16 , GxB_ANY_MINUS_INT16 , GxB_MIN_MINUS_INT32 , GxB_MAX_MINUS_INT32 , GxB_PLUS_MINUS_INT32 , GxB_TIMES_MINUS_INT32 , GxB_ANY_MINUS_INT32 , GxB_MIN_MINUS_INT64 , GxB_MAX_MINUS_INT64 , GxB_PLUS_MINUS_INT64 , GxB_TIMES_MINUS_INT64 , GxB_ANY_MINUS_INT64 , GxB_MIN_MINUS_UINT8 , GxB_MAX_MINUS_UINT8 , GxB_PLUS_MINUS_UINT8 , GxB_TIMES_MINUS_UINT8 , GxB_ANY_MINUS_UINT8 , GxB_MIN_MINUS_UINT16 , GxB_MAX_MINUS_UINT16 , GxB_PLUS_MINUS_UINT16 , GxB_TIMES_MINUS_UINT16 , GxB_ANY_MINUS_UINT16 , GxB_MIN_MINUS_UINT32 , GxB_MAX_MINUS_UINT32 , GxB_PLUS_MINUS_UINT32 , GxB_TIMES_MINUS_UINT32 , GxB_ANY_MINUS_UINT32 , GxB_MIN_MINUS_UINT64 , GxB_MAX_MINUS_UINT64 , GxB_PLUS_MINUS_UINT64 , GxB_TIMES_MINUS_UINT64 , GxB_ANY_MINUS_UINT64 , GxB_MIN_MINUS_FP32 , GxB_MAX_MINUS_FP32 , GxB_PLUS_MINUS_FP32 , GxB_TIMES_MINUS_FP32 , GxB_ANY_MINUS_FP32 , GxB_MIN_MINUS_FP64 , GxB_MAX_MINUS_FP64 , GxB_PLUS_MINUS_FP64 , GxB_TIMES_MINUS_FP64 , GxB_ANY_MINUS_FP64 , // semirings with multiply op: z = TIMES (x,y), all types x,y,z the same: GxB_MIN_TIMES_INT8 , GxB_MAX_TIMES_INT8 , GxB_PLUS_TIMES_INT8 , GxB_TIMES_TIMES_INT8 , GxB_ANY_TIMES_INT8 , GxB_MIN_TIMES_INT16 , GxB_MAX_TIMES_INT16 , GxB_PLUS_TIMES_INT16 , GxB_TIMES_TIMES_INT16 , GxB_ANY_TIMES_INT16 , GxB_MIN_TIMES_INT32 , GxB_MAX_TIMES_INT32 , GxB_PLUS_TIMES_INT32 , GxB_TIMES_TIMES_INT32 , GxB_ANY_TIMES_INT32 , GxB_MIN_TIMES_INT64 , GxB_MAX_TIMES_INT64 , GxB_PLUS_TIMES_INT64 , GxB_TIMES_TIMES_INT64 , GxB_ANY_TIMES_INT64 , GxB_MIN_TIMES_UINT8 , GxB_MAX_TIMES_UINT8 , GxB_PLUS_TIMES_UINT8 , GxB_TIMES_TIMES_UINT8 , GxB_ANY_TIMES_UINT8 , GxB_MIN_TIMES_UINT16 , GxB_MAX_TIMES_UINT16 , GxB_PLUS_TIMES_UINT16 , GxB_TIMES_TIMES_UINT16 , GxB_ANY_TIMES_UINT16 , GxB_MIN_TIMES_UINT32 , GxB_MAX_TIMES_UINT32 , GxB_PLUS_TIMES_UINT32 , GxB_TIMES_TIMES_UINT32 , GxB_ANY_TIMES_UINT32 , GxB_MIN_TIMES_UINT64 , GxB_MAX_TIMES_UINT64 , GxB_PLUS_TIMES_UINT64 , GxB_TIMES_TIMES_UINT64 , GxB_ANY_TIMES_UINT64 , GxB_MIN_TIMES_FP32 , GxB_MAX_TIMES_FP32 , GxB_PLUS_TIMES_FP32 , GxB_TIMES_TIMES_FP32 , GxB_ANY_TIMES_FP32 , GxB_MIN_TIMES_FP64 , GxB_MAX_TIMES_FP64 , GxB_PLUS_TIMES_FP64 , GxB_TIMES_TIMES_FP64 , GxB_ANY_TIMES_FP64 , // semirings with multiply op: z = DIV (x,y), all types x,y,z the same: GxB_MIN_DIV_INT8 , GxB_MAX_DIV_INT8 , GxB_PLUS_DIV_INT8 , GxB_TIMES_DIV_INT8 , GxB_ANY_DIV_INT8 , GxB_MIN_DIV_INT16 , GxB_MAX_DIV_INT16 , GxB_PLUS_DIV_INT16 , GxB_TIMES_DIV_INT16 , GxB_ANY_DIV_INT16 , GxB_MIN_DIV_INT32 , GxB_MAX_DIV_INT32 , GxB_PLUS_DIV_INT32 , GxB_TIMES_DIV_INT32 , GxB_ANY_DIV_INT32 , GxB_MIN_DIV_INT64 , GxB_MAX_DIV_INT64 , GxB_PLUS_DIV_INT64 , GxB_TIMES_DIV_INT64 , GxB_ANY_DIV_INT64 , GxB_MIN_DIV_UINT8 , GxB_MAX_DIV_UINT8 , GxB_PLUS_DIV_UINT8 , GxB_TIMES_DIV_UINT8 , GxB_ANY_DIV_UINT8 , GxB_MIN_DIV_UINT16 , GxB_MAX_DIV_UINT16 , GxB_PLUS_DIV_UINT16 , GxB_TIMES_DIV_UINT16 , GxB_ANY_DIV_UINT16 , GxB_MIN_DIV_UINT32 , GxB_MAX_DIV_UINT32 , GxB_PLUS_DIV_UINT32 , GxB_TIMES_DIV_UINT32 , GxB_ANY_DIV_UINT32 , GxB_MIN_DIV_UINT64 , GxB_MAX_DIV_UINT64 , GxB_PLUS_DIV_UINT64 , GxB_TIMES_DIV_UINT64 , GxB_ANY_DIV_UINT64 , GxB_MIN_DIV_FP32 , GxB_MAX_DIV_FP32 , GxB_PLUS_DIV_FP32 , GxB_TIMES_DIV_FP32 , GxB_ANY_DIV_FP32 , GxB_MIN_DIV_FP64 , GxB_MAX_DIV_FP64 , GxB_PLUS_DIV_FP64 , GxB_TIMES_DIV_FP64 , GxB_ANY_DIV_FP64 , // semirings with multiply op: z = RDIV (x,y), all types x,y,z the same: GxB_MIN_RDIV_INT8 , GxB_MAX_RDIV_INT8 , GxB_PLUS_RDIV_INT8 , GxB_TIMES_RDIV_INT8 , GxB_ANY_RDIV_INT8 , GxB_MIN_RDIV_INT16 , GxB_MAX_RDIV_INT16 , GxB_PLUS_RDIV_INT16 , GxB_TIMES_RDIV_INT16 , GxB_ANY_RDIV_INT16 , GxB_MIN_RDIV_INT32 , GxB_MAX_RDIV_INT32 , GxB_PLUS_RDIV_INT32 , GxB_TIMES_RDIV_INT32 , GxB_ANY_RDIV_INT32 , GxB_MIN_RDIV_INT64 , GxB_MAX_RDIV_INT64 , GxB_PLUS_RDIV_INT64 , GxB_TIMES_RDIV_INT64 , GxB_ANY_RDIV_INT64 , GxB_MIN_RDIV_UINT8 , GxB_MAX_RDIV_UINT8 , GxB_PLUS_RDIV_UINT8 , GxB_TIMES_RDIV_UINT8 , GxB_ANY_RDIV_UINT8 , GxB_MIN_RDIV_UINT16 , GxB_MAX_RDIV_UINT16 , GxB_PLUS_RDIV_UINT16 , GxB_TIMES_RDIV_UINT16 , GxB_ANY_RDIV_UINT16 , GxB_MIN_RDIV_UINT32 , GxB_MAX_RDIV_UINT32 , GxB_PLUS_RDIV_UINT32 , GxB_TIMES_RDIV_UINT32 , GxB_ANY_RDIV_UINT32 , GxB_MIN_RDIV_UINT64 , GxB_MAX_RDIV_UINT64 , GxB_PLUS_RDIV_UINT64 , GxB_TIMES_RDIV_UINT64 , GxB_ANY_RDIV_UINT64 , GxB_MIN_RDIV_FP32 , GxB_MAX_RDIV_FP32 , GxB_PLUS_RDIV_FP32 , GxB_TIMES_RDIV_FP32 , GxB_ANY_RDIV_FP32 , GxB_MIN_RDIV_FP64 , GxB_MAX_RDIV_FP64 , GxB_PLUS_RDIV_FP64 , GxB_TIMES_RDIV_FP64 , GxB_ANY_RDIV_FP64 , // semirings with multiply op: z = RMINUS (x,y), all types x,y,z the same: GxB_MIN_RMINUS_INT8 , GxB_MAX_RMINUS_INT8 , GxB_PLUS_RMINUS_INT8 , GxB_TIMES_RMINUS_INT8 , GxB_ANY_RMINUS_INT8 , GxB_MIN_RMINUS_INT16 , GxB_MAX_RMINUS_INT16 , GxB_PLUS_RMINUS_INT16 , GxB_TIMES_RMINUS_INT16 , GxB_ANY_RMINUS_INT16 , GxB_MIN_RMINUS_INT32 , GxB_MAX_RMINUS_INT32 , GxB_PLUS_RMINUS_INT32 , GxB_TIMES_RMINUS_INT32 , GxB_ANY_RMINUS_INT32 , GxB_MIN_RMINUS_INT64 , GxB_MAX_RMINUS_INT64 , GxB_PLUS_RMINUS_INT64 , GxB_TIMES_RMINUS_INT64 , GxB_ANY_RMINUS_INT64 , GxB_MIN_RMINUS_UINT8 , GxB_MAX_RMINUS_UINT8 , GxB_PLUS_RMINUS_UINT8 , GxB_TIMES_RMINUS_UINT8 , GxB_ANY_RMINUS_UINT8 , GxB_MIN_RMINUS_UINT16 , GxB_MAX_RMINUS_UINT16 , GxB_PLUS_RMINUS_UINT16 , GxB_TIMES_RMINUS_UINT16, GxB_ANY_RMINUS_UINT16 , GxB_MIN_RMINUS_UINT32 , GxB_MAX_RMINUS_UINT32 , GxB_PLUS_RMINUS_UINT32 , GxB_TIMES_RMINUS_UINT32, GxB_ANY_RMINUS_UINT32 , GxB_MIN_RMINUS_UINT64 , GxB_MAX_RMINUS_UINT64 , GxB_PLUS_RMINUS_UINT64 , GxB_TIMES_RMINUS_UINT64, GxB_ANY_RMINUS_UINT64 , GxB_MIN_RMINUS_FP32 , GxB_MAX_RMINUS_FP32 , GxB_PLUS_RMINUS_FP32 , GxB_TIMES_RMINUS_FP32 , GxB_ANY_RMINUS_FP32 , GxB_MIN_RMINUS_FP64 , GxB_MAX_RMINUS_FP64 , GxB_PLUS_RMINUS_FP64 , GxB_TIMES_RMINUS_FP64 , GxB_ANY_RMINUS_FP64 , // semirings with multiply op: z = ISEQ (x,y), all types x,y,z the same: GxB_MIN_ISEQ_INT8 , GxB_MAX_ISEQ_INT8 , GxB_PLUS_ISEQ_INT8 , GxB_TIMES_ISEQ_INT8 , GxB_ANY_ISEQ_INT8 , GxB_MIN_ISEQ_INT16 , GxB_MAX_ISEQ_INT16 , GxB_PLUS_ISEQ_INT16 , GxB_TIMES_ISEQ_INT16 , GxB_ANY_ISEQ_INT16 , GxB_MIN_ISEQ_INT32 , GxB_MAX_ISEQ_INT32 , GxB_PLUS_ISEQ_INT32 , GxB_TIMES_ISEQ_INT32 , GxB_ANY_ISEQ_INT32 , GxB_MIN_ISEQ_INT64 , GxB_MAX_ISEQ_INT64 , GxB_PLUS_ISEQ_INT64 , GxB_TIMES_ISEQ_INT64 , GxB_ANY_ISEQ_INT64 , GxB_MIN_ISEQ_UINT8 , GxB_MAX_ISEQ_UINT8 , GxB_PLUS_ISEQ_UINT8 , GxB_TIMES_ISEQ_UINT8 , GxB_ANY_ISEQ_UINT8 , GxB_MIN_ISEQ_UINT16 , GxB_MAX_ISEQ_UINT16 , GxB_PLUS_ISEQ_UINT16 , GxB_TIMES_ISEQ_UINT16 , GxB_ANY_ISEQ_UINT16 , GxB_MIN_ISEQ_UINT32 , GxB_MAX_ISEQ_UINT32 , GxB_PLUS_ISEQ_UINT32 , GxB_TIMES_ISEQ_UINT32 , GxB_ANY_ISEQ_UINT32 , GxB_MIN_ISEQ_UINT64 , GxB_MAX_ISEQ_UINT64 , GxB_PLUS_ISEQ_UINT64 , GxB_TIMES_ISEQ_UINT64 , GxB_ANY_ISEQ_UINT64 , GxB_MIN_ISEQ_FP32 , GxB_MAX_ISEQ_FP32 , GxB_PLUS_ISEQ_FP32 , GxB_TIMES_ISEQ_FP32 , GxB_ANY_ISEQ_FP32 , GxB_MIN_ISEQ_FP64 , GxB_MAX_ISEQ_FP64 , GxB_PLUS_ISEQ_FP64 , GxB_TIMES_ISEQ_FP64 , GxB_ANY_ISEQ_FP64 , // semirings with multiply op: z = ISNE (x,y), all types x,y,z the same: GxB_MIN_ISNE_INT8 , GxB_MAX_ISNE_INT8 , GxB_PLUS_ISNE_INT8 , GxB_TIMES_ISNE_INT8 , GxB_ANY_ISNE_INT8 , GxB_MIN_ISNE_INT16 , GxB_MAX_ISNE_INT16 , GxB_PLUS_ISNE_INT16 , GxB_TIMES_ISNE_INT16 , GxB_ANY_ISNE_INT16 , GxB_MIN_ISNE_INT32 , GxB_MAX_ISNE_INT32 , GxB_PLUS_ISNE_INT32 , GxB_TIMES_ISNE_INT32 , GxB_ANY_ISNE_INT32 , GxB_MIN_ISNE_INT64 , GxB_MAX_ISNE_INT64 , GxB_PLUS_ISNE_INT64 , GxB_TIMES_ISNE_INT64 , GxB_ANY_ISNE_INT64 , GxB_MIN_ISNE_UINT8 , GxB_MAX_ISNE_UINT8 , GxB_PLUS_ISNE_UINT8 , GxB_TIMES_ISNE_UINT8 , GxB_ANY_ISNE_UINT8 , GxB_MIN_ISNE_UINT16 , GxB_MAX_ISNE_UINT16 , GxB_PLUS_ISNE_UINT16 , GxB_TIMES_ISNE_UINT16 , GxB_ANY_ISNE_UINT16 , GxB_MIN_ISNE_UINT32 , GxB_MAX_ISNE_UINT32 , GxB_PLUS_ISNE_UINT32 , GxB_TIMES_ISNE_UINT32 , GxB_ANY_ISNE_UINT32 , GxB_MIN_ISNE_UINT64 , GxB_MAX_ISNE_UINT64 , GxB_PLUS_ISNE_UINT64 , GxB_TIMES_ISNE_UINT64 , GxB_ANY_ISNE_UINT64 , GxB_MIN_ISNE_FP32 , GxB_MAX_ISNE_FP32 , GxB_PLUS_ISNE_FP32 , GxB_TIMES_ISNE_FP32 , GxB_ANY_ISNE_FP32 , GxB_MIN_ISNE_FP64 , GxB_MAX_ISNE_FP64 , GxB_PLUS_ISNE_FP64 , GxB_TIMES_ISNE_FP64 , GxB_ANY_ISNE_FP64 , // semirings with multiply op: z = ISGT (x,y), all types x,y,z the same: GxB_MIN_ISGT_INT8 , GxB_MAX_ISGT_INT8 , GxB_PLUS_ISGT_INT8 , GxB_TIMES_ISGT_INT8 , GxB_ANY_ISGT_INT8 , GxB_MIN_ISGT_INT16 , GxB_MAX_ISGT_INT16 , GxB_PLUS_ISGT_INT16 , GxB_TIMES_ISGT_INT16 , GxB_ANY_ISGT_INT16 , GxB_MIN_ISGT_INT32 , GxB_MAX_ISGT_INT32 , GxB_PLUS_ISGT_INT32 , GxB_TIMES_ISGT_INT32 , GxB_ANY_ISGT_INT32 , GxB_MIN_ISGT_INT64 , GxB_MAX_ISGT_INT64 , GxB_PLUS_ISGT_INT64 , GxB_TIMES_ISGT_INT64 , GxB_ANY_ISGT_INT64 , GxB_MIN_ISGT_UINT8 , GxB_MAX_ISGT_UINT8 , GxB_PLUS_ISGT_UINT8 , GxB_TIMES_ISGT_UINT8 , GxB_ANY_ISGT_UINT8 , GxB_MIN_ISGT_UINT16 , GxB_MAX_ISGT_UINT16 , GxB_PLUS_ISGT_UINT16 , GxB_TIMES_ISGT_UINT16 , GxB_ANY_ISGT_UINT16 , GxB_MIN_ISGT_UINT32 , GxB_MAX_ISGT_UINT32 , GxB_PLUS_ISGT_UINT32 , GxB_TIMES_ISGT_UINT32 , GxB_ANY_ISGT_UINT32 , GxB_MIN_ISGT_UINT64 , GxB_MAX_ISGT_UINT64 , GxB_PLUS_ISGT_UINT64 , GxB_TIMES_ISGT_UINT64 , GxB_ANY_ISGT_UINT64 , GxB_MIN_ISGT_FP32 , GxB_MAX_ISGT_FP32 , GxB_PLUS_ISGT_FP32 , GxB_TIMES_ISGT_FP32 , GxB_ANY_ISGT_FP32 , GxB_MIN_ISGT_FP64 , GxB_MAX_ISGT_FP64 , GxB_PLUS_ISGT_FP64 , GxB_TIMES_ISGT_FP64 , GxB_ANY_ISGT_FP64 , // semirings with multiply op: z = ISLT (x,y), all types x,y,z the same: GxB_MIN_ISLT_INT8 , GxB_MAX_ISLT_INT8 , GxB_PLUS_ISLT_INT8 , GxB_TIMES_ISLT_INT8 , GxB_ANY_ISLT_INT8 , GxB_MIN_ISLT_INT16 , GxB_MAX_ISLT_INT16 , GxB_PLUS_ISLT_INT16 , GxB_TIMES_ISLT_INT16 , GxB_ANY_ISLT_INT16 , GxB_MIN_ISLT_INT32 , GxB_MAX_ISLT_INT32 , GxB_PLUS_ISLT_INT32 , GxB_TIMES_ISLT_INT32 , GxB_ANY_ISLT_INT32 , GxB_MIN_ISLT_INT64 , GxB_MAX_ISLT_INT64 , GxB_PLUS_ISLT_INT64 , GxB_TIMES_ISLT_INT64 , GxB_ANY_ISLT_INT64 , GxB_MIN_ISLT_UINT8 , GxB_MAX_ISLT_UINT8 , GxB_PLUS_ISLT_UINT8 , GxB_TIMES_ISLT_UINT8 , GxB_ANY_ISLT_UINT8 , GxB_MIN_ISLT_UINT16 , GxB_MAX_ISLT_UINT16 , GxB_PLUS_ISLT_UINT16 , GxB_TIMES_ISLT_UINT16 , GxB_ANY_ISLT_UINT16 , GxB_MIN_ISLT_UINT32 , GxB_MAX_ISLT_UINT32 , GxB_PLUS_ISLT_UINT32 , GxB_TIMES_ISLT_UINT32 , GxB_ANY_ISLT_UINT32 , GxB_MIN_ISLT_UINT64 , GxB_MAX_ISLT_UINT64 , GxB_PLUS_ISLT_UINT64 , GxB_TIMES_ISLT_UINT64 , GxB_ANY_ISLT_UINT64 , GxB_MIN_ISLT_FP32 , GxB_MAX_ISLT_FP32 , GxB_PLUS_ISLT_FP32 , GxB_TIMES_ISLT_FP32 , GxB_ANY_ISLT_FP32 , GxB_MIN_ISLT_FP64 , GxB_MAX_ISLT_FP64 , GxB_PLUS_ISLT_FP64 , GxB_TIMES_ISLT_FP64 , GxB_ANY_ISLT_FP64 , // semirings with multiply op: z = ISGE (x,y), all types x,y,z the same: GxB_MIN_ISGE_INT8 , GxB_MAX_ISGE_INT8 , GxB_PLUS_ISGE_INT8 , GxB_TIMES_ISGE_INT8 , GxB_ANY_ISGE_INT8 , GxB_MIN_ISGE_INT16 , GxB_MAX_ISGE_INT16 , GxB_PLUS_ISGE_INT16 , GxB_TIMES_ISGE_INT16 , GxB_ANY_ISGE_INT16 , GxB_MIN_ISGE_INT32 , GxB_MAX_ISGE_INT32 , GxB_PLUS_ISGE_INT32 , GxB_TIMES_ISGE_INT32 , GxB_ANY_ISGE_INT32 , GxB_MIN_ISGE_INT64 , GxB_MAX_ISGE_INT64 , GxB_PLUS_ISGE_INT64 , GxB_TIMES_ISGE_INT64 , GxB_ANY_ISGE_INT64 , GxB_MIN_ISGE_UINT8 , GxB_MAX_ISGE_UINT8 , GxB_PLUS_ISGE_UINT8 , GxB_TIMES_ISGE_UINT8 , GxB_ANY_ISGE_UINT8 , GxB_MIN_ISGE_UINT16 , GxB_MAX_ISGE_UINT16 , GxB_PLUS_ISGE_UINT16 , GxB_TIMES_ISGE_UINT16 , GxB_ANY_ISGE_UINT16 , GxB_MIN_ISGE_UINT32 , GxB_MAX_ISGE_UINT32 , GxB_PLUS_ISGE_UINT32 , GxB_TIMES_ISGE_UINT32 , GxB_ANY_ISGE_UINT32 , GxB_MIN_ISGE_UINT64 , GxB_MAX_ISGE_UINT64 , GxB_PLUS_ISGE_UINT64 , GxB_TIMES_ISGE_UINT64 , GxB_ANY_ISGE_UINT64 , GxB_MIN_ISGE_FP32 , GxB_MAX_ISGE_FP32 , GxB_PLUS_ISGE_FP32 , GxB_TIMES_ISGE_FP32 , GxB_ANY_ISGE_FP32 , GxB_MIN_ISGE_FP64 , GxB_MAX_ISGE_FP64 , GxB_PLUS_ISGE_FP64 , GxB_TIMES_ISGE_FP64 , GxB_ANY_ISGE_FP64 , // semirings with multiply op: z = ISLE (x,y), all types x,y,z the same: GxB_MIN_ISLE_INT8 , GxB_MAX_ISLE_INT8 , GxB_PLUS_ISLE_INT8 , GxB_TIMES_ISLE_INT8 , GxB_ANY_ISLE_INT8 , GxB_MIN_ISLE_INT16 , GxB_MAX_ISLE_INT16 , GxB_PLUS_ISLE_INT16 , GxB_TIMES_ISLE_INT16 , GxB_ANY_ISLE_INT16 , GxB_MIN_ISLE_INT32 , GxB_MAX_ISLE_INT32 , GxB_PLUS_ISLE_INT32 , GxB_TIMES_ISLE_INT32 , GxB_ANY_ISLE_INT32 , GxB_MIN_ISLE_INT64 , GxB_MAX_ISLE_INT64 , GxB_PLUS_ISLE_INT64 , GxB_TIMES_ISLE_INT64 , GxB_ANY_ISLE_INT64 , GxB_MIN_ISLE_UINT8 , GxB_MAX_ISLE_UINT8 , GxB_PLUS_ISLE_UINT8 , GxB_TIMES_ISLE_UINT8 , GxB_ANY_ISLE_UINT8 , GxB_MIN_ISLE_UINT16 , GxB_MAX_ISLE_UINT16 , GxB_PLUS_ISLE_UINT16 , GxB_TIMES_ISLE_UINT16 , GxB_ANY_ISLE_UINT16 , GxB_MIN_ISLE_UINT32 , GxB_MAX_ISLE_UINT32 , GxB_PLUS_ISLE_UINT32 , GxB_TIMES_ISLE_UINT32 , GxB_ANY_ISLE_UINT32 , GxB_MIN_ISLE_UINT64 , GxB_MAX_ISLE_UINT64 , GxB_PLUS_ISLE_UINT64 , GxB_TIMES_ISLE_UINT64 , GxB_ANY_ISLE_UINT64 , GxB_MIN_ISLE_FP32 , GxB_MAX_ISLE_FP32 , GxB_PLUS_ISLE_FP32 , GxB_TIMES_ISLE_FP32 , GxB_ANY_ISLE_FP32 , GxB_MIN_ISLE_FP64 , GxB_MAX_ISLE_FP64 , GxB_PLUS_ISLE_FP64 , GxB_TIMES_ISLE_FP64 , GxB_ANY_ISLE_FP64 , // semirings with multiply op: z = LOR (x,y), all types x,y,z the same: GxB_MIN_LOR_INT8 , GxB_MAX_LOR_INT8 , GxB_PLUS_LOR_INT8 , GxB_TIMES_LOR_INT8 , GxB_ANY_LOR_INT8 , GxB_MIN_LOR_INT16 , GxB_MAX_LOR_INT16 , GxB_PLUS_LOR_INT16 , GxB_TIMES_LOR_INT16 , GxB_ANY_LOR_INT16 , GxB_MIN_LOR_INT32 , GxB_MAX_LOR_INT32 , GxB_PLUS_LOR_INT32 , GxB_TIMES_LOR_INT32 , GxB_ANY_LOR_INT32 , GxB_MIN_LOR_INT64 , GxB_MAX_LOR_INT64 , GxB_PLUS_LOR_INT64 , GxB_TIMES_LOR_INT64 , GxB_ANY_LOR_INT64 , GxB_MIN_LOR_UINT8 , GxB_MAX_LOR_UINT8 , GxB_PLUS_LOR_UINT8 , GxB_TIMES_LOR_UINT8 , GxB_ANY_LOR_UINT8 , GxB_MIN_LOR_UINT16 , GxB_MAX_LOR_UINT16 , GxB_PLUS_LOR_UINT16 , GxB_TIMES_LOR_UINT16 , GxB_ANY_LOR_UINT16 , GxB_MIN_LOR_UINT32 , GxB_MAX_LOR_UINT32 , GxB_PLUS_LOR_UINT32 , GxB_TIMES_LOR_UINT32 , GxB_ANY_LOR_UINT32 , GxB_MIN_LOR_UINT64 , GxB_MAX_LOR_UINT64 , GxB_PLUS_LOR_UINT64 , GxB_TIMES_LOR_UINT64 , GxB_ANY_LOR_UINT64 , GxB_MIN_LOR_FP32 , GxB_MAX_LOR_FP32 , GxB_PLUS_LOR_FP32 , GxB_TIMES_LOR_FP32 , GxB_ANY_LOR_FP32 , GxB_MIN_LOR_FP64 , GxB_MAX_LOR_FP64 , GxB_PLUS_LOR_FP64 , GxB_TIMES_LOR_FP64 , GxB_ANY_LOR_FP64 , // semirings with multiply op: z = LAND (x,y), all types x,y,z the same: GxB_MIN_LAND_INT8 , GxB_MAX_LAND_INT8 , GxB_PLUS_LAND_INT8 , GxB_TIMES_LAND_INT8 , GxB_ANY_LAND_INT8 , GxB_MIN_LAND_INT16 , GxB_MAX_LAND_INT16 , GxB_PLUS_LAND_INT16 , GxB_TIMES_LAND_INT16 , GxB_ANY_LAND_INT16 , GxB_MIN_LAND_INT32 , GxB_MAX_LAND_INT32 , GxB_PLUS_LAND_INT32 , GxB_TIMES_LAND_INT32 , GxB_ANY_LAND_INT32 , GxB_MIN_LAND_INT64 , GxB_MAX_LAND_INT64 , GxB_PLUS_LAND_INT64 , GxB_TIMES_LAND_INT64 , GxB_ANY_LAND_INT64 , GxB_MIN_LAND_UINT8 , GxB_MAX_LAND_UINT8 , GxB_PLUS_LAND_UINT8 , GxB_TIMES_LAND_UINT8 , GxB_ANY_LAND_UINT8 , GxB_MIN_LAND_UINT16 , GxB_MAX_LAND_UINT16 , GxB_PLUS_LAND_UINT16 , GxB_TIMES_LAND_UINT16 , GxB_ANY_LAND_UINT16 , GxB_MIN_LAND_UINT32 , GxB_MAX_LAND_UINT32 , GxB_PLUS_LAND_UINT32 , GxB_TIMES_LAND_UINT32 , GxB_ANY_LAND_UINT32 , GxB_MIN_LAND_UINT64 , GxB_MAX_LAND_UINT64 , GxB_PLUS_LAND_UINT64 , GxB_TIMES_LAND_UINT64 , GxB_ANY_LAND_UINT64 , GxB_MIN_LAND_FP32 , GxB_MAX_LAND_FP32 , GxB_PLUS_LAND_FP32 , GxB_TIMES_LAND_FP32 , GxB_ANY_LAND_FP32 , GxB_MIN_LAND_FP64 , GxB_MAX_LAND_FP64 , GxB_PLUS_LAND_FP64 , GxB_TIMES_LAND_FP64 , GxB_ANY_LAND_FP64 , // semirings with multiply op: z = LXOR (x,y), all types x,y,z the same: GxB_MIN_LXOR_INT8 , GxB_MAX_LXOR_INT8 , GxB_PLUS_LXOR_INT8 , GxB_TIMES_LXOR_INT8 , GxB_ANY_LXOR_INT8 , GxB_MIN_LXOR_INT16 , GxB_MAX_LXOR_INT16 , GxB_PLUS_LXOR_INT16 , GxB_TIMES_LXOR_INT16 , GxB_ANY_LXOR_INT16 , GxB_MIN_LXOR_INT32 , GxB_MAX_LXOR_INT32 , GxB_PLUS_LXOR_INT32 , GxB_TIMES_LXOR_INT32 , GxB_ANY_LXOR_INT32 , GxB_MIN_LXOR_INT64 , GxB_MAX_LXOR_INT64 , GxB_PLUS_LXOR_INT64 , GxB_TIMES_LXOR_INT64 , GxB_ANY_LXOR_INT64 , GxB_MIN_LXOR_UINT8 , GxB_MAX_LXOR_UINT8 , GxB_PLUS_LXOR_UINT8 , GxB_TIMES_LXOR_UINT8 , GxB_ANY_LXOR_UINT8 , GxB_MIN_LXOR_UINT16 , GxB_MAX_LXOR_UINT16 , GxB_PLUS_LXOR_UINT16 , GxB_TIMES_LXOR_UINT16 , GxB_ANY_LXOR_UINT16 , GxB_MIN_LXOR_UINT32 , GxB_MAX_LXOR_UINT32 , GxB_PLUS_LXOR_UINT32 , GxB_TIMES_LXOR_UINT32 , GxB_ANY_LXOR_UINT32 , GxB_MIN_LXOR_UINT64 , GxB_MAX_LXOR_UINT64 , GxB_PLUS_LXOR_UINT64 , GxB_TIMES_LXOR_UINT64 , GxB_ANY_LXOR_UINT64 , GxB_MIN_LXOR_FP32 , GxB_MAX_LXOR_FP32 , GxB_PLUS_LXOR_FP32 , GxB_TIMES_LXOR_FP32 , GxB_ANY_LXOR_FP32 , GxB_MIN_LXOR_FP64 , GxB_MAX_LXOR_FP64 , GxB_PLUS_LXOR_FP64 , GxB_TIMES_LXOR_FP64 , GxB_ANY_LXOR_FP64 , //------------------------------------------------------------------------------ // 300 semirings with a comparator TxT -> bool, where T is non-Boolean //------------------------------------------------------------------------------ // In the 4th column the GxB_EQ_*_* semirings could also be called // GxB_LXNOR_*_*, since the EQ and LXNOR boolean operators are identical // but those names are not included. // semirings with multiply op: z = EQ (x,y), where z is boolean and x,y are given by the suffix: GxB_LOR_EQ_INT8 , GxB_LAND_EQ_INT8 , GxB_LXOR_EQ_INT8 , GxB_EQ_EQ_INT8 , GxB_ANY_EQ_INT8 , GxB_LOR_EQ_INT16 , GxB_LAND_EQ_INT16 , GxB_LXOR_EQ_INT16 , GxB_EQ_EQ_INT16 , GxB_ANY_EQ_INT16 , GxB_LOR_EQ_INT32 , GxB_LAND_EQ_INT32 , GxB_LXOR_EQ_INT32 , GxB_EQ_EQ_INT32 , GxB_ANY_EQ_INT32 , GxB_LOR_EQ_INT64 , GxB_LAND_EQ_INT64 , GxB_LXOR_EQ_INT64 , GxB_EQ_EQ_INT64 , GxB_ANY_EQ_INT64 , GxB_LOR_EQ_UINT8 , GxB_LAND_EQ_UINT8 , GxB_LXOR_EQ_UINT8 , GxB_EQ_EQ_UINT8 , GxB_ANY_EQ_UINT8 , GxB_LOR_EQ_UINT16 , GxB_LAND_EQ_UINT16 , GxB_LXOR_EQ_UINT16 , GxB_EQ_EQ_UINT16 , GxB_ANY_EQ_UINT16 , GxB_LOR_EQ_UINT32 , GxB_LAND_EQ_UINT32 , GxB_LXOR_EQ_UINT32 , GxB_EQ_EQ_UINT32 , GxB_ANY_EQ_UINT32 , GxB_LOR_EQ_UINT64 , GxB_LAND_EQ_UINT64 , GxB_LXOR_EQ_UINT64 , GxB_EQ_EQ_UINT64 , GxB_ANY_EQ_UINT64 , GxB_LOR_EQ_FP32 , GxB_LAND_EQ_FP32 , GxB_LXOR_EQ_FP32 , GxB_EQ_EQ_FP32 , GxB_ANY_EQ_FP32 , GxB_LOR_EQ_FP64 , GxB_LAND_EQ_FP64 , GxB_LXOR_EQ_FP64 , GxB_EQ_EQ_FP64 , GxB_ANY_EQ_FP64 , // semirings with multiply op: z = NE (x,y), where z is boolean and x,y are given by the suffix: GxB_LOR_NE_INT8 , GxB_LAND_NE_INT8 , GxB_LXOR_NE_INT8 , GxB_EQ_NE_INT8 , GxB_ANY_NE_INT8 , GxB_LOR_NE_INT16 , GxB_LAND_NE_INT16 , GxB_LXOR_NE_INT16 , GxB_EQ_NE_INT16 , GxB_ANY_NE_INT16 , GxB_LOR_NE_INT32 , GxB_LAND_NE_INT32 , GxB_LXOR_NE_INT32 , GxB_EQ_NE_INT32 , GxB_ANY_NE_INT32 , GxB_LOR_NE_INT64 , GxB_LAND_NE_INT64 , GxB_LXOR_NE_INT64 , GxB_EQ_NE_INT64 , GxB_ANY_NE_INT64 , GxB_LOR_NE_UINT8 , GxB_LAND_NE_UINT8 , GxB_LXOR_NE_UINT8 , GxB_EQ_NE_UINT8 , GxB_ANY_NE_UINT8 , GxB_LOR_NE_UINT16 , GxB_LAND_NE_UINT16 , GxB_LXOR_NE_UINT16 , GxB_EQ_NE_UINT16 , GxB_ANY_NE_UINT16 , GxB_LOR_NE_UINT32 , GxB_LAND_NE_UINT32 , GxB_LXOR_NE_UINT32 , GxB_EQ_NE_UINT32 , GxB_ANY_NE_UINT32 , GxB_LOR_NE_UINT64 , GxB_LAND_NE_UINT64 , GxB_LXOR_NE_UINT64 , GxB_EQ_NE_UINT64 , GxB_ANY_NE_UINT64 , GxB_LOR_NE_FP32 , GxB_LAND_NE_FP32 , GxB_LXOR_NE_FP32 , GxB_EQ_NE_FP32 , GxB_ANY_NE_FP32 , GxB_LOR_NE_FP64 , GxB_LAND_NE_FP64 , GxB_LXOR_NE_FP64 , GxB_EQ_NE_FP64 , GxB_ANY_NE_FP64 , // semirings with multiply op: z = GT (x,y), where z is boolean and x,y are given by the suffix: GxB_LOR_GT_INT8 , GxB_LAND_GT_INT8 , GxB_LXOR_GT_INT8 , GxB_EQ_GT_INT8 , GxB_ANY_GT_INT8 , GxB_LOR_GT_INT16 , GxB_LAND_GT_INT16 , GxB_LXOR_GT_INT16 , GxB_EQ_GT_INT16 , GxB_ANY_GT_INT16 , GxB_LOR_GT_INT32 , GxB_LAND_GT_INT32 , GxB_LXOR_GT_INT32 , GxB_EQ_GT_INT32 , GxB_ANY_GT_INT32 , GxB_LOR_GT_INT64 , GxB_LAND_GT_INT64 , GxB_LXOR_GT_INT64 , GxB_EQ_GT_INT64 , GxB_ANY_GT_INT64 , GxB_LOR_GT_UINT8 , GxB_LAND_GT_UINT8 , GxB_LXOR_GT_UINT8 , GxB_EQ_GT_UINT8 , GxB_ANY_GT_UINT8 , GxB_LOR_GT_UINT16 , GxB_LAND_GT_UINT16 , GxB_LXOR_GT_UINT16 , GxB_EQ_GT_UINT16 , GxB_ANY_GT_UINT16 , GxB_LOR_GT_UINT32 , GxB_LAND_GT_UINT32 , GxB_LXOR_GT_UINT32 , GxB_EQ_GT_UINT32 , GxB_ANY_GT_UINT32 , GxB_LOR_GT_UINT64 , GxB_LAND_GT_UINT64 , GxB_LXOR_GT_UINT64 , GxB_EQ_GT_UINT64 , GxB_ANY_GT_UINT64 , GxB_LOR_GT_FP32 , GxB_LAND_GT_FP32 , GxB_LXOR_GT_FP32 , GxB_EQ_GT_FP32 , GxB_ANY_GT_FP32 , GxB_LOR_GT_FP64 , GxB_LAND_GT_FP64 , GxB_LXOR_GT_FP64 , GxB_EQ_GT_FP64 , GxB_ANY_GT_FP64 , // semirings with multiply op: z = LT (x,y), where z is boolean and x,y are given by the suffix: GxB_LOR_LT_INT8 , GxB_LAND_LT_INT8 , GxB_LXOR_LT_INT8 , GxB_EQ_LT_INT8 , GxB_ANY_LT_INT8 , GxB_LOR_LT_INT16 , GxB_LAND_LT_INT16 , GxB_LXOR_LT_INT16 , GxB_EQ_LT_INT16 , GxB_ANY_LT_INT16 , GxB_LOR_LT_INT32 , GxB_LAND_LT_INT32 , GxB_LXOR_LT_INT32 , GxB_EQ_LT_INT32 , GxB_ANY_LT_INT32 , GxB_LOR_LT_INT64 , GxB_LAND_LT_INT64 , GxB_LXOR_LT_INT64 , GxB_EQ_LT_INT64 , GxB_ANY_LT_INT64 , GxB_LOR_LT_UINT8 , GxB_LAND_LT_UINT8 , GxB_LXOR_LT_UINT8 , GxB_EQ_LT_UINT8 , GxB_ANY_LT_UINT8 , GxB_LOR_LT_UINT16 , GxB_LAND_LT_UINT16 , GxB_LXOR_LT_UINT16 , GxB_EQ_LT_UINT16 , GxB_ANY_LT_UINT16 , GxB_LOR_LT_UINT32 , GxB_LAND_LT_UINT32 , GxB_LXOR_LT_UINT32 , GxB_EQ_LT_UINT32 , GxB_ANY_LT_UINT32 , GxB_LOR_LT_UINT64 , GxB_LAND_LT_UINT64 , GxB_LXOR_LT_UINT64 , GxB_EQ_LT_UINT64 , GxB_ANY_LT_UINT64 , GxB_LOR_LT_FP32 , GxB_LAND_LT_FP32 , GxB_LXOR_LT_FP32 , GxB_EQ_LT_FP32 , GxB_ANY_LT_FP32 , GxB_LOR_LT_FP64 , GxB_LAND_LT_FP64 , GxB_LXOR_LT_FP64 , GxB_EQ_LT_FP64 , GxB_ANY_LT_FP64 , // semirings with multiply op: z = GE (x,y), where z is boolean and x,y are given by the suffix: GxB_LOR_GE_INT8 , GxB_LAND_GE_INT8 , GxB_LXOR_GE_INT8 , GxB_EQ_GE_INT8 , GxB_ANY_GE_INT8 , GxB_LOR_GE_INT16 , GxB_LAND_GE_INT16 , GxB_LXOR_GE_INT16 , GxB_EQ_GE_INT16 , GxB_ANY_GE_INT16 , GxB_LOR_GE_INT32 , GxB_LAND_GE_INT32 , GxB_LXOR_GE_INT32 , GxB_EQ_GE_INT32 , GxB_ANY_GE_INT32 , GxB_LOR_GE_INT64 , GxB_LAND_GE_INT64 , GxB_LXOR_GE_INT64 , GxB_EQ_GE_INT64 , GxB_ANY_GE_INT64 , GxB_LOR_GE_UINT8 , GxB_LAND_GE_UINT8 , GxB_LXOR_GE_UINT8 , GxB_EQ_GE_UINT8 , GxB_ANY_GE_UINT8 , GxB_LOR_GE_UINT16 , GxB_LAND_GE_UINT16 , GxB_LXOR_GE_UINT16 , GxB_EQ_GE_UINT16 , GxB_ANY_GE_UINT16 , GxB_LOR_GE_UINT32 , GxB_LAND_GE_UINT32 , GxB_LXOR_GE_UINT32 , GxB_EQ_GE_UINT32 , GxB_ANY_GE_UINT32 , GxB_LOR_GE_UINT64 , GxB_LAND_GE_UINT64 , GxB_LXOR_GE_UINT64 , GxB_EQ_GE_UINT64 , GxB_ANY_GE_UINT64 , GxB_LOR_GE_FP32 , GxB_LAND_GE_FP32 , GxB_LXOR_GE_FP32 , GxB_EQ_GE_FP32 , GxB_ANY_GE_FP32 , GxB_LOR_GE_FP64 , GxB_LAND_GE_FP64 , GxB_LXOR_GE_FP64 , GxB_EQ_GE_FP64 , GxB_ANY_GE_FP64 , // semirings with multiply op: z = LE (x,y), where z is boolean and x,y are given by the suffix: GxB_LOR_LE_INT8 , GxB_LAND_LE_INT8 , GxB_LXOR_LE_INT8 , GxB_EQ_LE_INT8 , GxB_ANY_LE_INT8 , GxB_LOR_LE_INT16 , GxB_LAND_LE_INT16 , GxB_LXOR_LE_INT16 , GxB_EQ_LE_INT16 , GxB_ANY_LE_INT16 , GxB_LOR_LE_INT32 , GxB_LAND_LE_INT32 , GxB_LXOR_LE_INT32 , GxB_EQ_LE_INT32 , GxB_ANY_LE_INT32 , GxB_LOR_LE_INT64 , GxB_LAND_LE_INT64 , GxB_LXOR_LE_INT64 , GxB_EQ_LE_INT64 , GxB_ANY_LE_INT64 , GxB_LOR_LE_UINT8 , GxB_LAND_LE_UINT8 , GxB_LXOR_LE_UINT8 , GxB_EQ_LE_UINT8 , GxB_ANY_LE_UINT8 , GxB_LOR_LE_UINT16 , GxB_LAND_LE_UINT16 , GxB_LXOR_LE_UINT16 , GxB_EQ_LE_UINT16 , GxB_ANY_LE_UINT16 , GxB_LOR_LE_UINT32 , GxB_LAND_LE_UINT32 , GxB_LXOR_LE_UINT32 , GxB_EQ_LE_UINT32 , GxB_ANY_LE_UINT32 , GxB_LOR_LE_UINT64 , GxB_LAND_LE_UINT64 , GxB_LXOR_LE_UINT64 , GxB_EQ_LE_UINT64 , GxB_ANY_LE_UINT64 , GxB_LOR_LE_FP32 , GxB_LAND_LE_FP32 , GxB_LXOR_LE_FP32 , GxB_EQ_LE_FP32 , GxB_ANY_LE_FP32 , GxB_LOR_LE_FP64 , GxB_LAND_LE_FP64 , GxB_LXOR_LE_FP64 , GxB_EQ_LE_FP64 , GxB_ANY_LE_FP64 , //------------------------------------------------------------------------------ // 55 semirings with purely Boolean types, bool x bool -> bool //------------------------------------------------------------------------------ // Note that lor_pair, land_pair, and eq_pair are all identical to any_pair. // These 3 are marked below. GxB_EQ_*_BOOL could be called // GxB_LXNOR_*_BOOL, and GxB_*_EQ_BOOL could be called GxB_*_LXNOR_BOOL, // but those names are not included. // purely boolean semirings in the form GxB_(add monoid)_(multiply operator)_BOOL: GxB_LOR_FIRST_BOOL , GxB_LAND_FIRST_BOOL , GxB_LXOR_FIRST_BOOL , GxB_EQ_FIRST_BOOL , GxB_ANY_FIRST_BOOL , GxB_LOR_SECOND_BOOL , GxB_LAND_SECOND_BOOL , GxB_LXOR_SECOND_BOOL , GxB_EQ_SECOND_BOOL , GxB_ANY_SECOND_BOOL , GxB_LOR_PAIR_BOOL/**/ , GxB_LAND_PAIR_BOOL/**/ , GxB_LXOR_PAIR_BOOL , GxB_EQ_PAIR_BOOL/**/ , GxB_ANY_PAIR_BOOL , GxB_LOR_LOR_BOOL , GxB_LAND_LOR_BOOL , GxB_LXOR_LOR_BOOL , GxB_EQ_LOR_BOOL , GxB_ANY_LOR_BOOL , GxB_LOR_LAND_BOOL , GxB_LAND_LAND_BOOL , GxB_LXOR_LAND_BOOL , GxB_EQ_LAND_BOOL , GxB_ANY_LAND_BOOL , GxB_LOR_LXOR_BOOL , GxB_LAND_LXOR_BOOL , GxB_LXOR_LXOR_BOOL , GxB_EQ_LXOR_BOOL , GxB_ANY_LXOR_BOOL , GxB_LOR_EQ_BOOL , GxB_LAND_EQ_BOOL , GxB_LXOR_EQ_BOOL , GxB_EQ_EQ_BOOL , GxB_ANY_EQ_BOOL , GxB_LOR_GT_BOOL , GxB_LAND_GT_BOOL , GxB_LXOR_GT_BOOL , GxB_EQ_GT_BOOL , GxB_ANY_GT_BOOL , GxB_LOR_LT_BOOL , GxB_LAND_LT_BOOL , GxB_LXOR_LT_BOOL , GxB_EQ_LT_BOOL , GxB_ANY_LT_BOOL , GxB_LOR_GE_BOOL , GxB_LAND_GE_BOOL , GxB_LXOR_GE_BOOL , GxB_EQ_GE_BOOL , GxB_ANY_GE_BOOL , GxB_LOR_LE_BOOL , GxB_LAND_LE_BOOL , GxB_LXOR_LE_BOOL , GxB_EQ_LE_BOOL , GxB_ANY_LE_BOOL , //------------------------------------------------------------------------------ // 54 complex semirings //------------------------------------------------------------------------------ // 3 monoids (plus, times, any), 2 types (FC32 and FC64), and 9 // multiplicative operators. // Note that times_pair is identical to any_pair. // These 2 are marked below. GxB_PLUS_FIRST_FC32 , GxB_TIMES_FIRST_FC32 , GxB_ANY_FIRST_FC32 , GxB_PLUS_FIRST_FC64 , GxB_TIMES_FIRST_FC64 , GxB_ANY_FIRST_FC64 , GxB_PLUS_SECOND_FC32 , GxB_TIMES_SECOND_FC32 , GxB_ANY_SECOND_FC32 , GxB_PLUS_SECOND_FC64 , GxB_TIMES_SECOND_FC64 , GxB_ANY_SECOND_FC64 , GxB_PLUS_PAIR_FC32 , GxB_TIMES_PAIR_FC32/**/, GxB_ANY_PAIR_FC32 , GxB_PLUS_PAIR_FC64 , GxB_TIMES_PAIR_FC64/**/, GxB_ANY_PAIR_FC64 , GxB_PLUS_PLUS_FC32 , GxB_TIMES_PLUS_FC32 , GxB_ANY_PLUS_FC32 , GxB_PLUS_PLUS_FC64 , GxB_TIMES_PLUS_FC64 , GxB_ANY_PLUS_FC64 , GxB_PLUS_MINUS_FC32 , GxB_TIMES_MINUS_FC32 , GxB_ANY_MINUS_FC32 , GxB_PLUS_MINUS_FC64 , GxB_TIMES_MINUS_FC64 , GxB_ANY_MINUS_FC64 , GxB_PLUS_TIMES_FC32 , GxB_TIMES_TIMES_FC32 , GxB_ANY_TIMES_FC32 , GxB_PLUS_TIMES_FC64 , GxB_TIMES_TIMES_FC64 , GxB_ANY_TIMES_FC64 , GxB_PLUS_DIV_FC32 , GxB_TIMES_DIV_FC32 , GxB_ANY_DIV_FC32 , GxB_PLUS_DIV_FC64 , GxB_TIMES_DIV_FC64 , GxB_ANY_DIV_FC64 , GxB_PLUS_RDIV_FC32 , GxB_TIMES_RDIV_FC32 , GxB_ANY_RDIV_FC32 , GxB_PLUS_RDIV_FC64 , GxB_TIMES_RDIV_FC64 , GxB_ANY_RDIV_FC64 , GxB_PLUS_RMINUS_FC32 , GxB_TIMES_RMINUS_FC32 , GxB_ANY_RMINUS_FC32 , GxB_PLUS_RMINUS_FC64 , GxB_TIMES_RMINUS_FC64 , GxB_ANY_RMINUS_FC64 , //------------------------------------------------------------------------------ // 64 bitwise semirings //------------------------------------------------------------------------------ // monoids: (BOR, BAND, BXOR, BXNOR) x // mult: (BOR, BAND, BXOR, BXNOR) x // types: (UINT8, UINT16, UINT32, UINT64) GxB_BOR_BOR_UINT8 , GxB_BOR_BOR_UINT16 , GxB_BOR_BOR_UINT32 , GxB_BOR_BOR_UINT64 , GxB_BOR_BAND_UINT8 , GxB_BOR_BAND_UINT16 , GxB_BOR_BAND_UINT32 , GxB_BOR_BAND_UINT64 , GxB_BOR_BXOR_UINT8 , GxB_BOR_BXOR_UINT16 , GxB_BOR_BXOR_UINT32 , GxB_BOR_BXOR_UINT64 , GxB_BOR_BXNOR_UINT8 , GxB_BOR_BXNOR_UINT16 , GxB_BOR_BXNOR_UINT32 , GxB_BOR_BXNOR_UINT64 , GxB_BAND_BOR_UINT8 , GxB_BAND_BOR_UINT16 , GxB_BAND_BOR_UINT32 , GxB_BAND_BOR_UINT64 , GxB_BAND_BAND_UINT8 , GxB_BAND_BAND_UINT16 , GxB_BAND_BAND_UINT32 , GxB_BAND_BAND_UINT64 , GxB_BAND_BXOR_UINT8 , GxB_BAND_BXOR_UINT16 , GxB_BAND_BXOR_UINT32 , GxB_BAND_BXOR_UINT64 , GxB_BAND_BXNOR_UINT8 , GxB_BAND_BXNOR_UINT16 , GxB_BAND_BXNOR_UINT32 , GxB_BAND_BXNOR_UINT64 , GxB_BXOR_BOR_UINT8 , GxB_BXOR_BOR_UINT16 , GxB_BXOR_BOR_UINT32 , GxB_BXOR_BOR_UINT64 , GxB_BXOR_BAND_UINT8 , GxB_BXOR_BAND_UINT16 , GxB_BXOR_BAND_UINT32 , GxB_BXOR_BAND_UINT64 , GxB_BXOR_BXOR_UINT8 , GxB_BXOR_BXOR_UINT16 , GxB_BXOR_BXOR_UINT32 , GxB_BXOR_BXOR_UINT64 , GxB_BXOR_BXNOR_UINT8 , GxB_BXOR_BXNOR_UINT16 , GxB_BXOR_BXNOR_UINT32 , GxB_BXOR_BXNOR_UINT64 , GxB_BXNOR_BOR_UINT8 , GxB_BXNOR_BOR_UINT16 , GxB_BXNOR_BOR_UINT32 , GxB_BXNOR_BOR_UINT64 , GxB_BXNOR_BAND_UINT8 , GxB_BXNOR_BAND_UINT16 , GxB_BXNOR_BAND_UINT32 , GxB_BXNOR_BAND_UINT64 , GxB_BXNOR_BXOR_UINT8 , GxB_BXNOR_BXOR_UINT16 , GxB_BXNOR_BXOR_UINT32 , GxB_BXNOR_BXOR_UINT64 , GxB_BXNOR_BXNOR_UINT8 , GxB_BXNOR_BXNOR_UINT16 , GxB_BXNOR_BXNOR_UINT32 , GxB_BXNOR_BXNOR_UINT64 , //------------------------------------------------------------------------------ // 80 positional semirings //------------------------------------------------------------------------------ // monoids: (MIN, MAX, ANY, PLUS, TIMES) x // mult: (FIRSTI, FIRSTI1, FIRSTJ, FIRSTJ1, SECONDI, SECONDI1, SECONDJ, SECONDJ1) // types: (INT32, INT64) GxB_MIN_FIRSTI_INT32, GxB_MIN_FIRSTI_INT64, GxB_MAX_FIRSTI_INT32, GxB_MAX_FIRSTI_INT64, GxB_ANY_FIRSTI_INT32, GxB_ANY_FIRSTI_INT64, GxB_PLUS_FIRSTI_INT32, GxB_PLUS_FIRSTI_INT64, GxB_TIMES_FIRSTI_INT32, GxB_TIMES_FIRSTI_INT64, GxB_MIN_FIRSTI1_INT32, GxB_MIN_FIRSTI1_INT64, GxB_MAX_FIRSTI1_INT32, GxB_MAX_FIRSTI1_INT64, GxB_ANY_FIRSTI1_INT32, GxB_ANY_FIRSTI1_INT64, GxB_PLUS_FIRSTI1_INT32, GxB_PLUS_FIRSTI1_INT64, GxB_TIMES_FIRSTI1_INT32, GxB_TIMES_FIRSTI1_INT64, GxB_MIN_FIRSTJ_INT32, GxB_MIN_FIRSTJ_INT64, GxB_MAX_FIRSTJ_INT32, GxB_MAX_FIRSTJ_INT64, GxB_ANY_FIRSTJ_INT32, GxB_ANY_FIRSTJ_INT64, GxB_PLUS_FIRSTJ_INT32, GxB_PLUS_FIRSTJ_INT64, GxB_TIMES_FIRSTJ_INT32, GxB_TIMES_FIRSTJ_INT64, GxB_MIN_FIRSTJ1_INT32, GxB_MIN_FIRSTJ1_INT64, GxB_MAX_FIRSTJ1_INT32, GxB_MAX_FIRSTJ1_INT64, GxB_ANY_FIRSTJ1_INT32, GxB_ANY_FIRSTJ1_INT64, GxB_PLUS_FIRSTJ1_INT32, GxB_PLUS_FIRSTJ1_INT64, GxB_TIMES_FIRSTJ1_INT32, GxB_TIMES_FIRSTJ1_INT64, GxB_MIN_SECONDI_INT32, GxB_MIN_SECONDI_INT64, GxB_MAX_SECONDI_INT32, GxB_MAX_SECONDI_INT64, GxB_ANY_SECONDI_INT32, GxB_ANY_SECONDI_INT64, GxB_PLUS_SECONDI_INT32, GxB_PLUS_SECONDI_INT64, GxB_TIMES_SECONDI_INT32, GxB_TIMES_SECONDI_INT64, GxB_MIN_SECONDI1_INT32, GxB_MIN_SECONDI1_INT64, GxB_MAX_SECONDI1_INT32, GxB_MAX_SECONDI1_INT64, GxB_ANY_SECONDI1_INT32, GxB_ANY_SECONDI1_INT64, GxB_PLUS_SECONDI1_INT32, GxB_PLUS_SECONDI1_INT64, GxB_TIMES_SECONDI1_INT32, GxB_TIMES_SECONDI1_INT64, GxB_MIN_SECONDJ_INT32, GxB_MIN_SECONDJ_INT64, GxB_MAX_SECONDJ_INT32, GxB_MAX_SECONDJ_INT64, GxB_ANY_SECONDJ_INT32, GxB_ANY_SECONDJ_INT64, GxB_PLUS_SECONDJ_INT32, GxB_PLUS_SECONDJ_INT64, GxB_TIMES_SECONDJ_INT32, GxB_TIMES_SECONDJ_INT64, GxB_MIN_SECONDJ1_INT32, GxB_MIN_SECONDJ1_INT64, GxB_MAX_SECONDJ1_INT32, GxB_MAX_SECONDJ1_INT64, GxB_ANY_SECONDJ1_INT32, GxB_ANY_SECONDJ1_INT64, GxB_PLUS_SECONDJ1_INT32, GxB_PLUS_SECONDJ1_INT64, GxB_TIMES_SECONDJ1_INT32, GxB_TIMES_SECONDJ1_INT64 ; //------------------------------------------------------------------------------ // GrB_* semirings //------------------------------------------------------------------------------ // The v1.3 C API for GraphBLAS adds the following 124 predefined semirings, // with GrB_* names. They are identical to 124 GxB_* semirings defined above, // with the same name, except that GrB_LXNOR_LOR_SEMIRING_BOOL is identical to // GxB_EQ_LOR_BOOL (since GrB_EQ_BOOL == GrB_LXNOR). The old names are listed // below alongside each new name; the new GrB_* names are preferred. // 12 kinds of GrB_* semirings are available for all 10 real non-boolean types: // PLUS_TIMES, PLUS_MIN, // MIN_PLUS, MIN_TIMES, MIN_FIRST, MIN_SECOND, MIN_MAX, // MAX_PLUS, MAX_TIMES, MAX_FIRST, MAX_SECOND, MAX_MIN // and 4 semirings for boolean only: // LOR_LAND, LAND_LOR, LXOR_LAND, LXNOR_LOR. // GxB_* semirings corresponding to the equivalent GrB_* semiring are // historical. GB_PUBLIC GrB_Semiring //-------------------------------------------------------------------------- // 20 semirings with PLUS monoids //-------------------------------------------------------------------------- // PLUS_TIMES semirings for all 10 real, non-boolean types: GrB_PLUS_TIMES_SEMIRING_INT8, // GxB_PLUS_TIMES_INT8 GrB_PLUS_TIMES_SEMIRING_INT16, // GxB_PLUS_TIMES_INT16 GrB_PLUS_TIMES_SEMIRING_INT32, // GxB_PLUS_TIMES_INT32 GrB_PLUS_TIMES_SEMIRING_INT64, // GxB_PLUS_TIMES_INT64 GrB_PLUS_TIMES_SEMIRING_UINT8, // GxB_PLUS_TIMES_UINT8 GrB_PLUS_TIMES_SEMIRING_UINT16, // GxB_PLUS_TIMES_UINT16 GrB_PLUS_TIMES_SEMIRING_UINT32, // GxB_PLUS_TIMES_UINT32 GrB_PLUS_TIMES_SEMIRING_UINT64, // GxB_PLUS_TIMES_UINT64 GrB_PLUS_TIMES_SEMIRING_FP32, // GxB_PLUS_TIMES_FP32 GrB_PLUS_TIMES_SEMIRING_FP64, // GxB_PLUS_TIMES_FP64 // PLUS_MIN semirings for all 10 real, non-boolean types: GrB_PLUS_MIN_SEMIRING_INT8, // GxB_PLUS_MIN_INT8 GrB_PLUS_MIN_SEMIRING_INT16, // GxB_PLUS_MIN_INT16 GrB_PLUS_MIN_SEMIRING_INT32, // GxB_PLUS_MIN_INT32 GrB_PLUS_MIN_SEMIRING_INT64, // GxB_PLUS_MIN_INT64 GrB_PLUS_MIN_SEMIRING_UINT8, // GxB_PLUS_MIN_UINT8 GrB_PLUS_MIN_SEMIRING_UINT16, // GxB_PLUS_MIN_UINT16 GrB_PLUS_MIN_SEMIRING_UINT32, // GxB_PLUS_MIN_UINT32 GrB_PLUS_MIN_SEMIRING_UINT64, // GxB_PLUS_MIN_UINT64 GrB_PLUS_MIN_SEMIRING_FP32, // GxB_PLUS_MIN_FP32 GrB_PLUS_MIN_SEMIRING_FP64, // GxB_PLUS_MIN_FP64 //-------------------------------------------------------------------------- // 50 semirings with MIN monoids //-------------------------------------------------------------------------- // MIN_PLUS semirings for all 10 real, non-boolean types: GrB_MIN_PLUS_SEMIRING_INT8, // GxB_MIN_PLUS_INT8 GrB_MIN_PLUS_SEMIRING_INT16, // GxB_MIN_PLUS_INT16 GrB_MIN_PLUS_SEMIRING_INT32, // GxB_MIN_PLUS_INT32 GrB_MIN_PLUS_SEMIRING_INT64, // GxB_MIN_PLUS_INT64 GrB_MIN_PLUS_SEMIRING_UINT8, // GxB_MIN_PLUS_UINT8 GrB_MIN_PLUS_SEMIRING_UINT16, // GxB_MIN_PLUS_UINT16 GrB_MIN_PLUS_SEMIRING_UINT32, // GxB_MIN_PLUS_UINT32 GrB_MIN_PLUS_SEMIRING_UINT64, // GxB_MIN_PLUS_UINT64 GrB_MIN_PLUS_SEMIRING_FP32, // GxB_MIN_PLUS_FP32 GrB_MIN_PLUS_SEMIRING_FP64, // GxB_MIN_PLUS_FP64 // MIN_TIMES semirings for all 10 real, non-boolean types: GrB_MIN_TIMES_SEMIRING_INT8, // GxB_MIN_TIMES_INT8 GrB_MIN_TIMES_SEMIRING_INT16, // GxB_MIN_TIMES_INT16 GrB_MIN_TIMES_SEMIRING_INT32, // GxB_MIN_TIMES_INT32 GrB_MIN_TIMES_SEMIRING_INT64, // GxB_MIN_TIMES_INT64 GrB_MIN_TIMES_SEMIRING_UINT8, // GxB_MIN_TIMES_UINT8 GrB_MIN_TIMES_SEMIRING_UINT16, // GxB_MIN_TIMES_UINT16 GrB_MIN_TIMES_SEMIRING_UINT32, // GxB_MIN_TIMES_UINT32 GrB_MIN_TIMES_SEMIRING_UINT64, // GxB_MIN_TIMES_UINT64 GrB_MIN_TIMES_SEMIRING_FP32, // GxB_MIN_TIMES_FP32 GrB_MIN_TIMES_SEMIRING_FP64, // GxB_MIN_TIMES_FP64 // MIN_FIRST semirings for all 10 real, non-boolean types: GrB_MIN_FIRST_SEMIRING_INT8, // GxB_MIN_FIRST_INT8 GrB_MIN_FIRST_SEMIRING_INT16, // GxB_MIN_FIRST_INT16 GrB_MIN_FIRST_SEMIRING_INT32, // GxB_MIN_FIRST_INT32 GrB_MIN_FIRST_SEMIRING_INT64, // GxB_MIN_FIRST_INT64 GrB_MIN_FIRST_SEMIRING_UINT8, // GxB_MIN_FIRST_UINT8 GrB_MIN_FIRST_SEMIRING_UINT16, // GxB_MIN_FIRST_UINT16 GrB_MIN_FIRST_SEMIRING_UINT32, // GxB_MIN_FIRST_UINT32 GrB_MIN_FIRST_SEMIRING_UINT64, // GxB_MIN_FIRST_UINT64 GrB_MIN_FIRST_SEMIRING_FP32, // GxB_MIN_FIRST_FP32 GrB_MIN_FIRST_SEMIRING_FP64, // GxB_MIN_FIRST_FP64 // MIN_SECOND semirings for all 10 real, non-boolean types: GrB_MIN_SECOND_SEMIRING_INT8, // GxB_MIN_SECOND_INT8 GrB_MIN_SECOND_SEMIRING_INT16, // GxB_MIN_SECOND_INT16 GrB_MIN_SECOND_SEMIRING_INT32, // GxB_MIN_SECOND_INT32 GrB_MIN_SECOND_SEMIRING_INT64, // GxB_MIN_SECOND_INT64 GrB_MIN_SECOND_SEMIRING_UINT8, // GxB_MIN_SECOND_UINT8 GrB_MIN_SECOND_SEMIRING_UINT16, // GxB_MIN_SECOND_UINT16 GrB_MIN_SECOND_SEMIRING_UINT32, // GxB_MIN_SECOND_UINT32 GrB_MIN_SECOND_SEMIRING_UINT64, // GxB_MIN_SECOND_UINT64 GrB_MIN_SECOND_SEMIRING_FP32, // GxB_MIN_SECOND_FP32 GrB_MIN_SECOND_SEMIRING_FP64, // GxB_MIN_SECOND_FP64 // MIN_MAX semirings for all 10 real, non-boolean types: GrB_MIN_MAX_SEMIRING_INT8, // GxB_MIN_MAX_INT8 GrB_MIN_MAX_SEMIRING_INT16, // GxB_MIN_MAX_INT16 GrB_MIN_MAX_SEMIRING_INT32, // GxB_MIN_MAX_INT32 GrB_MIN_MAX_SEMIRING_INT64, // GxB_MIN_MAX_INT64 GrB_MIN_MAX_SEMIRING_UINT8, // GxB_MIN_MAX_UINT8 GrB_MIN_MAX_SEMIRING_UINT16, // GxB_MIN_MAX_UINT16 GrB_MIN_MAX_SEMIRING_UINT32, // GxB_MIN_MAX_UINT32 GrB_MIN_MAX_SEMIRING_UINT64, // GxB_MIN_MAX_UINT64 GrB_MIN_MAX_SEMIRING_FP32, // GxB_MIN_MAX_FP32 GrB_MIN_MAX_SEMIRING_FP64, // GxB_MIN_MAX_FP64 //-------------------------------------------------------------------------- // 50 semirings with MAX monoids //-------------------------------------------------------------------------- // MAX_PLUS semirings for all 10 real, non-boolean types GrB_MAX_PLUS_SEMIRING_INT8, // GxB_MAX_PLUS_INT8 GrB_MAX_PLUS_SEMIRING_INT16, // GxB_MAX_PLUS_INT16 GrB_MAX_PLUS_SEMIRING_INT32, // GxB_MAX_PLUS_INT32 GrB_MAX_PLUS_SEMIRING_INT64, // GxB_MAX_PLUS_INT64 GrB_MAX_PLUS_SEMIRING_UINT8, // GxB_MAX_PLUS_UINT8 GrB_MAX_PLUS_SEMIRING_UINT16, // GxB_MAX_PLUS_UINT16 GrB_MAX_PLUS_SEMIRING_UINT32, // GxB_MAX_PLUS_UINT32 GrB_MAX_PLUS_SEMIRING_UINT64, // GxB_MAX_PLUS_UINT64 GrB_MAX_PLUS_SEMIRING_FP32, // GxB_MAX_PLUS_FP32 GrB_MAX_PLUS_SEMIRING_FP64, // GxB_MAX_PLUS_FP64 // MAX_TIMES semirings for all 10 real, non-boolean types: GrB_MAX_TIMES_SEMIRING_INT8, // GxB_MAX_TIMES_INT8 GrB_MAX_TIMES_SEMIRING_INT16, // GxB_MAX_TIMES_INT16 GrB_MAX_TIMES_SEMIRING_INT32, // GxB_MAX_TIMES_INT32 GrB_MAX_TIMES_SEMIRING_INT64, // GxB_MAX_TIMES_INT64 GrB_MAX_TIMES_SEMIRING_UINT8, // GxB_MAX_TIMES_UINT8 GrB_MAX_TIMES_SEMIRING_UINT16, // GxB_MAX_TIMES_UINT16 GrB_MAX_TIMES_SEMIRING_UINT32, // GxB_MAX_TIMES_UINT32 GrB_MAX_TIMES_SEMIRING_UINT64, // GxB_MAX_TIMES_UINT64 GrB_MAX_TIMES_SEMIRING_FP32, // GxB_MAX_TIMES_FP32 GrB_MAX_TIMES_SEMIRING_FP64, // GxB_MAX_TIMES_FP64 // MAX_FIRST semirings for all 10 real, non-boolean types: GrB_MAX_FIRST_SEMIRING_INT8, // GxB_MAX_FIRST_INT8 GrB_MAX_FIRST_SEMIRING_INT16, // GxB_MAX_FIRST_INT16 GrB_MAX_FIRST_SEMIRING_INT32, // GxB_MAX_FIRST_INT32 GrB_MAX_FIRST_SEMIRING_INT64, // GxB_MAX_FIRST_INT64 GrB_MAX_FIRST_SEMIRING_UINT8, // GxB_MAX_FIRST_UINT8 GrB_MAX_FIRST_SEMIRING_UINT16, // GxB_MAX_FIRST_UINT16 GrB_MAX_FIRST_SEMIRING_UINT32, // GxB_MAX_FIRST_UINT32 GrB_MAX_FIRST_SEMIRING_UINT64, // GxB_MAX_FIRST_UINT64 GrB_MAX_FIRST_SEMIRING_FP32, // GxB_MAX_FIRST_FP32 GrB_MAX_FIRST_SEMIRING_FP64, // GxB_MAX_FIRST_FP64 // MAX_SECOND semirings for all 10 real, non-boolean types: GrB_MAX_SECOND_SEMIRING_INT8, // GxB_MAX_SECOND_INT8 GrB_MAX_SECOND_SEMIRING_INT16, // GxB_MAX_SECOND_INT16 GrB_MAX_SECOND_SEMIRING_INT32, // GxB_MAX_SECOND_INT32 GrB_MAX_SECOND_SEMIRING_INT64, // GxB_MAX_SECOND_INT64 GrB_MAX_SECOND_SEMIRING_UINT8, // GxB_MAX_SECOND_UINT8 GrB_MAX_SECOND_SEMIRING_UINT16, // GxB_MAX_SECOND_UINT16 GrB_MAX_SECOND_SEMIRING_UINT32, // GxB_MAX_SECOND_UINT32 GrB_MAX_SECOND_SEMIRING_UINT64, // GxB_MAX_SECOND_UINT64 GrB_MAX_SECOND_SEMIRING_FP32, // GxB_MAX_SECOND_FP32 GrB_MAX_SECOND_SEMIRING_FP64, // GxB_MAX_SECOND_FP64 // MAX_MIN semirings for all 10 real, non-boolean types: GrB_MAX_MIN_SEMIRING_INT8, // GxB_MAX_MIN_INT8 GrB_MAX_MIN_SEMIRING_INT16, // GxB_MAX_MIN_INT16 GrB_MAX_MIN_SEMIRING_INT32, // GxB_MAX_MIN_INT32 GrB_MAX_MIN_SEMIRING_INT64, // GxB_MAX_MIN_INT64 GrB_MAX_MIN_SEMIRING_UINT8, // GxB_MAX_MIN_UINT8 GrB_MAX_MIN_SEMIRING_UINT16, // GxB_MAX_MIN_UINT16 GrB_MAX_MIN_SEMIRING_UINT32, // GxB_MAX_MIN_UINT32 GrB_MAX_MIN_SEMIRING_UINT64, // GxB_MAX_MIN_UINT64 GrB_MAX_MIN_SEMIRING_FP32, // GxB_MAX_MIN_FP32 GrB_MAX_MIN_SEMIRING_FP64, // GxB_MAX_MIN_FP64 //-------------------------------------------------------------------------- // 4 boolean semirings: //-------------------------------------------------------------------------- GrB_LOR_LAND_SEMIRING_BOOL, // GxB_LOR_LAND_BOOL GrB_LAND_LOR_SEMIRING_BOOL, // GxB_LAND_LOR_BOOL GrB_LXOR_LAND_SEMIRING_BOOL, // GxB_LXOR_LAND_BOOL GrB_LXNOR_LOR_SEMIRING_BOOL ; // GxB_EQ_LOR_BOOL (note EQ == LXNOR) //============================================================================== // GrB_*_resize: change the size of a matrix or vector //============================================================================== // If the dimensions decrease, entries that fall outside the resized matrix or // vector are deleted. GB_PUBLIC GrB_Info GrB_Matrix_resize // change the size of a matrix ( GrB_Matrix C, // matrix to modify GrB_Index nrows_new, // new number of rows in matrix GrB_Index ncols_new // new number of columns in matrix ) ; GB_PUBLIC GrB_Info GrB_Vector_resize // change the size of a vector ( GrB_Vector w, // vector to modify GrB_Index nrows_new // new number of rows in vector ) ; // GxB_*_resize are identical to the GrB_*resize methods above GB_PUBLIC GrB_Info GxB_Matrix_resize // change the size of a matrix (historical) ( GrB_Matrix C, // matrix to modify GrB_Index nrows_new, // new number of rows in matrix GrB_Index ncols_new // new number of columns in matrix ) ; GB_PUBLIC GrB_Info GxB_Vector_resize // change the size of a vector (historical) ( GrB_Vector w, // vector to modify GrB_Index nrows_new // new number of rows in vector ) ; // GxB_resize is a generic function for resizing a matrix or vector: // GrB_Vector_resize (u,nrows_new) // GrB_Matrix_resize (A,nrows_new,ncols_new) #if GxB_STDC_VERSION >= 201112L #define GxB_resize(arg1,...) \ _Generic \ ( \ (arg1), \ GrB_Vector : GrB_Vector_resize , \ GrB_Matrix : GrB_Matrix_resize \ ) \ (arg1, __VA_ARGS__) #endif //============================================================================== // GxB_fprint and GxB_print: print the contents of a GraphBLAS object //============================================================================== // GxB_fprint (object, GxB_Print_Level pr, FILE *f) prints the contents of any // of the 9 GraphBLAS objects to the file f, and also does an extensive test on // the object to determine if it is valid. It returns one of the following // error conditions: // // GrB_SUCCESS object is valid // GrB_UNINITIALIZED_OBJECT object is not initialized // GrB_INVALID_OBJECT object is not valid // GrB_NULL_POINTER object is a NULL pointer // GrB_INVALID_VALUE fprintf returned an I/O error; see the ANSI C // errno or GrB_error( )for details. // // GxB_fprint does not modify the status of any object. If a matrix or vector // has not been completed, the pending computations are guaranteed to *not* be // performed by GxB_fprint. The reason is simple. It is possible for a bug in // the user application (such as accessing memory outside the bounds of an // array) to mangle the internal content of a GraphBLAS object, and GxB_fprint // can be a helpful tool to track down this bug. If GxB_fprint attempted to // complete any computations prior to printing or checking the contents of the // matrix or vector, then further errors could occur, including a segfault. // // The type-specific functions include an additional argument, the name string. // The name is printed at the beginning of the display (assuming pr is not // GxB_SILENT) so that the object can be more easily identified in the output. // For the type-generic methods GxB_fprint and GxB_print, the name string is // the variable name of the object itself. // // If f is NULL, stdout is used; this is not an error condition. If pr is // outside the bounds 0 to 3, negative values are treated as GxB_SILENT, and // values > 3 are treated as GxB_COMPLETE. If name is NULL, it is treated as // the empty string. // // GxB_print (object, GxB_Print_Level pr) is the same as GxB_fprint, except // that it prints the contents with printf instead of fprintf to a file f. // // The exact content and format of what is printed is implementation-dependent, // and will change from version to version of SuiteSparse:GraphBLAS. Do not // attempt to rely on the exact content or format by trying to parse the // resulting output via another program. The intent of these functions is to // produce a report of the object for visual inspection. typedef enum { GxB_SILENT = 0, // nothing is printed, just check the object GxB_SUMMARY = 1, // print a terse summary GxB_SHORT = 2, // short description, about 30 entries of a matrix GxB_COMPLETE = 3, // print the entire contents of the object GxB_SHORT_VERBOSE = 4, // GxB_SHORT but with "%.15g" for doubles GxB_COMPLETE_VERBOSE = 5 // GxB_COMPLETE but with "%.15g" for doubles } GxB_Print_Level ; GB_PUBLIC GrB_Info GxB_Type_fprint // print and check a GrB_Type ( GrB_Type type, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_UnaryOp_fprint // print and check a GrB_UnaryOp ( GrB_UnaryOp unaryop, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_BinaryOp_fprint // print and check a GrB_BinaryOp ( GrB_BinaryOp binaryop, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_IndexUnaryOp_fprint // print and check a GrB_IndexUnaryOp ( GrB_IndexUnaryOp op, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_SelectOp_fprint // print and check a GxB_SelectOp ( GxB_SelectOp selectop, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_Monoid_fprint // print and check a GrB_Monoid ( GrB_Monoid monoid, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_Semiring_fprint // print and check a GrB_Semiring ( GrB_Semiring semiring, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_Descriptor_fprint // print and check a GrB_Descriptor ( GrB_Descriptor descriptor, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_Matrix_fprint // print and check a GrB_Matrix ( GrB_Matrix A, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_Vector_fprint // print and check a GrB_Vector ( GrB_Vector v, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; GB_PUBLIC GrB_Info GxB_Scalar_fprint // print and check a GrB_Scalar ( GrB_Scalar s, // object to print and check const char *name, // name of the object GxB_Print_Level pr, // print level FILE *f // file for output ) ; #if GxB_STDC_VERSION >= 201112L #define GxB_fprint(object,pr,f) \ _Generic \ ( \ (object), \ const GrB_Type : GxB_Type_fprint , \ GrB_Type : GxB_Type_fprint , \ const GrB_UnaryOp : GxB_UnaryOp_fprint , \ GrB_UnaryOp : GxB_UnaryOp_fprint , \ const GrB_BinaryOp : GxB_BinaryOp_fprint , \ GrB_BinaryOp : GxB_BinaryOp_fprint , \ const GrB_IndexUnaryOp : GxB_IndexUnaryOp_fprint , \ GrB_IndexUnaryOp : GxB_IndexUnaryOp_fprint , \ const GxB_SelectOp : GxB_SelectOp_fprint , \ GxB_SelectOp : GxB_SelectOp_fprint , \ const GrB_Monoid : GxB_Monoid_fprint , \ GrB_Monoid : GxB_Monoid_fprint , \ const GrB_Semiring : GxB_Semiring_fprint , \ GrB_Semiring : GxB_Semiring_fprint , \ const GrB_Scalar : GxB_Scalar_fprint , \ GrB_Scalar : GxB_Scalar_fprint , \ const GrB_Vector : GxB_Vector_fprint , \ GrB_Vector : GxB_Vector_fprint , \ const GrB_Matrix : GxB_Matrix_fprint , \ GrB_Matrix : GxB_Matrix_fprint , \ const GrB_Descriptor : GxB_Descriptor_fprint , \ GrB_Descriptor : GxB_Descriptor_fprint \ ) \ (object, GB_STR(object), pr, f) #define GxB_print(object,pr) GxB_fprint(object,pr,NULL) #endif //============================================================================== // Matrix and vector import/export/pack/unpack //============================================================================== // The import/export/pack/unpack functions allow the user application to create // a GrB_Matrix or GrB_Vector object, and to extract its contents, faster and // with less memory overhead than the GrB_*_build and GrB_*_extractTuples // functions. // The semantics of import/export/pack/unpack are the same as the "move // constructor" in C++. On import, the user provides a set of arrays that have // been previously allocated via the ANSI C malloc function. The arrays define // the content of the matrix or vector. Unlike GrB_*_build, the GraphBLAS // library then takes ownership of the user's input arrays and may either (a) // incorporate them into its internal data structure for the new GrB_Matrix or // GrB_Vector, potentially creating the GrB_Matrix or GrB_Vector in constant // time with no memory copying performed, or (b) if the library does not // support the import format directly, then it may convert the input to its // internal format, and then free the user's input arrays. GraphBLAS may also // choose to use a mix of the two strategies. In either case, the input arrays // are no longer "owned" by the user application. If A is a GrB_Matrix created // by an import/pack, the user input arrays are freed no later than GrB_free // (&A), and may be freed earlier, at the discretion of the GraphBLAS library. // The data structure of the GrB_Matrix and GrB_Vector remain opaque. // The export/unpack of a GrB_Matrix or GrB_Vector is symmetric with the import // operation. The export is destructive, where the GrB_Matrix or GrB_Vector no // longer exists when the export completes. The GrB_Matrix or GrB_Vector // exists after an unpack operation, just with no entries. In both export and // unpack, the user is returned several arrays that contain the matrix or // vector in the requested format. Ownership of these arrays is given to the // user application, which is then responsible for freeing them via the ANSI C // free function. If the output format is supported by the GraphBLAS library, // then these arrays may be returned to the user application in O(1) time and // with no memory copying performed. Otherwise, the GraphBLAS library will // create the output arrays for the user (via the ANSI C malloc function), fill // them with the GrB_Matrix or GrB_Vector data, and then return the newly // allocated arrays to the user. // Eight different formats are provided for import/export. For each format, // the Ax array has a C-type <type> corresponding to one of the 13 built-in // types in GraphBLAS (bool, int*_t, uint*_t, float, double, float complex, or // double complex), or a user-defined type. // On import/pack, the required user arrays Ah, Ap, Ab, Ai, Aj, and/or Ax must // be non-NULL pointers to memory space allocated by the ANSI C malloc (or // calloc, or realloc), unless nzmax is zero (in which case the Ab, Ai, Aj, Ax, // vb, vi, and vx arrays may all be NULL). For the import, A (or GrB_Vector v) // is undefined on input, just like GrB_*_new, the GrB_Matrix. If the import // is successful, the GrB_Matrix A or GrB_Vector v is created, and the pointers // to the user input arrays have been set to NULL. These user arrays have // either been incorporated directly into the GrB_Matrix A or GrB_Vector v, in // which case the user input arrays will eventually be freed by GrB_free (&A), // or their contents have been copied and the arrays freed. This decision is // made by the GraphBLAS library itself, and the user application has no // control over this decision. // If any of the arrays Ab, Aj, Ai, Ax, vb, vi, or vx have zero size (with // nzmax of zero), they are allowed to be be NULL pointers on input. // A matrix or vector may be "iso", where all entries present in the pattern // have the same value. In this case, the boolean iso flag is true, and the // corresponding numerical array (Ax for matrices, vx for vectors, below) need // be only large enough to hold a single value. // No error checking is performed on the content of the user input arrays. If // the user input arrays do not conform to the precise specifications above, // results are undefined. No typecasting of the values of the matrix or vector // entries is performed on import or export. // SuiteSparse:GraphBLAS supports all eight formats natively (CSR, CSC, // HyperCSR, and HyperCSC, BitmapR, BitmapC, FullR, FullC). For vectors, only // CSC, BitmapC, and FullC formats are used. On import, the all eight formats // take O(1) time and memory to import. On export, if the GrB_Matrix or // GrB_Vector is already in this particular format, then the export takes O(1) // time and no memory copying is performed. // If the import is not successful, the GxB_Matrix_import_* functions return A // as NULL, GxB_Vector_import returns v as NULL, and the user input arrays are // neither modified nor freed. They are still owned by the user application. // If the input data is untrusted, use the following descriptor setting for // GxB_Matrix_import* and GxB_Matrix_pack*. The import/pack will be slower, // but secure. GrB_Matrix_import uses the slow, secure method, since it has // no descriptor input. // // GxB_set (desc, GxB_IMPORT, GxB_SECURE_IMPORT) ; // As of v5.2.0, GxB_*import* and GxB_*export* are declared historical. Use // GxB_*pack* and GxB_*unpack* instead. The GxB import/export will be kept // but only documented here, not in the User Guide. //------------------------------------------------------------------------------ // GxB_Matrix_pack_CSR: pack a CSR matrix //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_CSR // historical: use GxB_Matrix_pack_CSR ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix GrB_Index **Ap, // row "pointers", Ap_size >= (nrows+1)* sizeof(int64_t) GrB_Index **Aj, // column indices, Aj_size >= nvals(A) * sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A) * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Aj_size, // size of Aj in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso bool jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_CSR // pack a CSR matrix ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) GrB_Index **Ap, // row "pointers", Ap_size >= (nrows+1)* sizeof(int64_t) GrB_Index **Aj, // column indices, Aj_size >= nvals(A) * sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A) * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Aj_size, // size of Aj in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso bool jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; // CSR: an nrows-by-ncols matrix with nvals entries in CSR format consists // of 3 arrays, where nvals = Ap [nrows]: // // GrB_Index Ap [nrows+1], Aj [nvals] ; <type> Ax [nvals] ; // // The column indices of entries in the ith row of the matrix are held // in Aj [Ap [i] ... Ap[i+1]], and the corresponding values are held // in the same positions in Ax. Column indices must be in the range 0 // to ncols-1. If jumbled is false, the column indices must appear in // sorted order within each row. No duplicate column indices may // appear in any row. Ap [0] must equal zero, and Ap [nrows] must // equal nvals. The Ap array must be of size nrows+1 (or larger), and // the Aj and Ax arrays must have size at least nvals. If nvals is // zero, then the Aj and Ax arrays need not be present and can be // NULL. //------------------------------------------------------------------------------ // GxB_Matrix_pack_CSC: pack a CSC matrix //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_CSC // historical: use GxB_Matrix_pack_CSC ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix GrB_Index **Ap, // col "pointers", Ap_size >= (ncols+1)*sizeof(int64_t) GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A) * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Ai_size, // size of Ai in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso bool jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_CSC // pack a CSC matrix ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) GrB_Index **Ap, // col "pointers", Ap_size >= (ncols+1)*sizeof(int64_t) GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A) * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Ai_size, // size of Ai in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso bool jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; // CSC: an nrows-by-ncols matrix with nvals entries in CSC format consists // of 3 arrays, where nvals = Ap [ncols]: // // GrB_Index Ap [ncols+1], Ai [nvals] ; <type> Ax [nvals] ; // // The row indices of entries in the jth column of the matrix are held // in Ai [Ap [j] ... Ap[j+1]], and the corresponding values are held // in the same positions in Ax. Row indices must be in the range 0 to // nrows-1. If jumbled is false, the row indices must appear in // sorted order within each column. No duplicate row indices may // appear in any column. Ap [0] must equal zero, and Ap [ncols] must // equal nvals. The Ap array must be of size ncols+1 (or larger), and // the Ai and Ax arrays must have size at least nvals. If nvals is // zero, then the Ai and Ax arrays need not be present and can be // NULL. //------------------------------------------------------------------------------ // GxB_Matrix_pack_HyperCSR: pack a hypersparse CSR matrix //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_HyperCSR // historical: use GxB_Matrix_pack_HyperCSR ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix GrB_Index **Ap, // row "pointers", Ap_size >= (nvec+1)*sizeof(int64_t) GrB_Index **Ah, // row indices, Ah_size >= nvec*sizeof(int64_t) GrB_Index **Aj, // column indices, Aj_size >= nvals(A)*sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A) * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Ah_size, // size of Ah in bytes GrB_Index Aj_size, // size of Aj in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvec, // number of rows that appear in Ah bool jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_HyperCSR // pack a hypersparse CSR matrix ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) GrB_Index **Ap, // row "pointers", Ap_size >= (nvec+1)*sizeof(int64_t) GrB_Index **Ah, // row indices, Ah_size >= nvec*sizeof(int64_t) GrB_Index **Aj, // column indices, Aj_size >= nvals(A)*sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A) * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Ah_size, // size of Ah in bytes GrB_Index Aj_size, // size of Aj in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvec, // number of rows that appear in Ah bool jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; // HyperCSR: an nrows-by-ncols matrix with nvals entries and nvec // rows that may have entries in HyperCSR format consists of 4 arrays, // where nvals = Ap [nvec]: // // GrB_Index Ah [nvec], Ap [nvec+1], Aj [nvals] ; // <type> Ax [nvals] ; // // The Aj and Ax arrays are the same for a matrix in CSR or HyperCSR // format. Only Ap and Ah differ. // // The Ah array is a list of the row indices of rows that appear in // the matrix. It // must appear in sorted order, and no duplicates may appear. If i = // Ah [k] is the kth row, then the column indices of the ith // row appear in Aj [Ap [k] ... Ap [k+1]], and the corresponding // values appear in the same locations in Ax. Column indices must be // in the range 0 to ncols-1, and must appear in sorted order within // each row. No duplicate column indices may appear in any row. nvec // may be zero, to denote an array with no entries. The Ah array must // be of size at least nvec, Ap must be of size at least nvec+1, and // Aj and Ax must be at least of size nvals. If nvals is zero, then // the Aj and Ax arrays need not be present and can be NULL. //------------------------------------------------------------------------------ // GxB_Matrix_pack_HyperCSC: pack a hypersparse CSC matrix //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_HyperCSC // historical: use GxB_Matrix_pack_HyperCSC ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix GrB_Index **Ap, // col "pointers", Ap_size >= (nvec+1)*sizeof(int64_t) GrB_Index **Ah, // column indices, Ah_size >= nvec*sizeof(int64_t) GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A)*(type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Ah_size, // size of Ah in bytes GrB_Index Ai_size, // size of Ai in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvec, // number of columns that appear in Ah bool jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_HyperCSC // pack a hypersparse CSC matrix ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) GrB_Index **Ap, // col "pointers", Ap_size >= (nvec+1)*sizeof(int64_t) GrB_Index **Ah, // column indices, Ah_size >= nvec*sizeof(int64_t) GrB_Index **Ai, // row indices, Ai_size >= nvals(A)*sizeof(int64_t) void **Ax, // values, Ax_size >= nvals(A)*(type size) // or Ax_size >= (type size), if iso is true GrB_Index Ap_size, // size of Ap in bytes GrB_Index Ah_size, // size of Ah in bytes GrB_Index Ai_size, // size of Ai in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvec, // number of columns that appear in Ah bool jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; // HyperCSC: an nrows-by-ncols matrix with nvals entries and nvec // columns that may have entries in HyperCSC format consists of 4 arrays, // where nvals = Ap [nvec]: // // // GrB_Index Ah [nvec], Ap [nvec+1], Ai [nvals] ; // <type> Ax [nvals] ; // // The Ai and Ax arrays are the same for a matrix in CSC or HyperCSC // format. Only Ap and Ah differ. // // The Ah array is a list of the column indices of non-empty columns. // It must appear in sorted order, and no duplicates may appear. If j // = Ah [k] is the kth non-empty column, then the row indices of the // jth column appear in Ai [Ap [k] ... Ap [k+1]], and the // corresponding values appear in the same locations in Ax. Row // indices must be in the range 0 to nrows-1, and must appear in // sorted order within each column. No duplicate row indices may // appear in any column. nvec may be zero, to denote an array with no // entries. The Ah array must be of size at least nvec, Ap must be of // size at least nvec+1, and Ai and Ax must be at least of size nvals. // If nvals is zero, then the Ai and Ax arrays need not be present and // can be NULL. //------------------------------------------------------------------------------ // GxB_Matrix_pack_BitmapR: pack a bitmap matrix, held by row //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_BitmapR // historical: use GxB_Matrix_pack_BitmapR ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix int8_t **Ab, // bitmap, Ab_size >= nrows*ncols void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ab_size, // size of Ab in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_BitmapR // pack a bitmap matrix, held by row ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) int8_t **Ab, // bitmap, Ab_size >= nrows*ncols void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ab_size, // size of Ab in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; // BitmapR: a dense format, but able to represent sparsity structure of A. // // int8_t Ab [nrows*ncols] ; // <type> Ax [nrows*ncols] ; // // Ab and Ax are both of size nrows*ncols. Ab [i*ncols+j] = 1 if the // A(i,j) entry is present with value Ax [i*ncols+j], or 0 if A(i,j) // is not present. nvals must equal the number of 1's in the Ab // array. //------------------------------------------------------------------------------ // GxB_Matrix_pack_BitmapC: pack a bitmap matrix, held by column //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_BitmapC // historical: use GxB_Matrix_pack_BitmapC ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix int8_t **Ab, // bitmap, Ab_size >= nrows*ncols void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ab_size, // size of Ab in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_BitmapC // pack a bitmap matrix, held by column ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) int8_t **Ab, // bitmap, Ab_size >= nrows*ncols void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ab_size, // size of Ab in bytes GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso GrB_Index nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; // BitmapC: a dense format, but able to represent sparsity structure of A. // // int8_t Ab [nrows*ncols] ; // <type> Ax [nrows*ncols] ; // // Ab and Ax are both of size nrows*ncols. Ab [i+j*nrows] = 1 if the // A(i,j) entry is present with value Ax [i+j*nrows], or 0 if A(i,j) // is not present. nvals must equal the number of 1's in the Ab // array. //------------------------------------------------------------------------------ // GxB_Matrix_pack_FullR: pack a full matrix, held by row //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_FullR // historical: use GxB_Matrix_pack_FullR ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_FullR // pack a full matrix, held by row ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso const GrB_Descriptor desc ) ; // FullR: an nrows-by-ncols full matrix held in row-major order: // // <type> Ax [nrows*ncols] ; // // Ax is an array of size nrows*ncols, where A(i,j) is held in // Ax [i*ncols+j]. All entries in A are present. //------------------------------------------------------------------------------ // GxB_Matrix_pack_FullC: pack a full matrix, held by column //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Matrix_import_FullC // historical: use GxB_Matrix_pack_FullC ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_pack_FullC // pack a full matrix, held by column ( GrB_Matrix A, // matrix to create (type, nrows, ncols unchanged) void **Ax, // values, Ax_size >= nrows*ncols * (type size) // or Ax_size >= (type size), if iso is true GrB_Index Ax_size, // size of Ax in bytes bool iso, // if true, A is iso const GrB_Descriptor desc ) ; // FullC: an nrows-by-ncols full matrix held in column-major order: // // <type> Ax [nrows*ncols] ; // // Ax is an array of size nrows*ncols, where A(i,j) is held in // Ax [i+j*nrows]. All entries in A are present. //------------------------------------------------------------------------------ // GxB_Vector_pack_CSC: import/pack a vector in CSC format //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Vector_import_CSC // historical: use GxB_Vector_pack_CSC ( GrB_Vector *v, // handle of vector to create GrB_Type type, // type of vector to create GrB_Index n, // vector length GrB_Index **vi, // indices, vi_size >= nvals(v) * sizeof(int64_t) void **vx, // values, vx_size >= nvals(v) * (type size) // or vx_size >= (type size), if iso is true GrB_Index vi_size, // size of vi in bytes GrB_Index vx_size, // size of vx in bytes bool iso, // if true, v is iso GrB_Index nvals, // # of entries in vector bool jumbled, // if true, indices may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_pack_CSC // pack a vector in CSC format ( GrB_Vector v, // vector to create (type and length unchanged) GrB_Index **vi, // indices, vi_size >= nvals(v) * sizeof(int64_t) void **vx, // values, vx_size >= nvals(v) * (type size) // or vx_size >= (type size), if iso is true GrB_Index vi_size, // size of vi in bytes GrB_Index vx_size, // size of vx in bytes bool iso, // if true, v is iso GrB_Index nvals, // # of entries in vector bool jumbled, // if true, indices may be unsorted const GrB_Descriptor desc ) ; // The GrB_Vector is treated as if it was a single column of an n-by-1 // matrix in CSC format, except that no vp array is required. If nvals is // zero, then the vi and vx arrays need not be present and can be NULL. //------------------------------------------------------------------------------ // GxB_Vector_pack_Bitmap: pack a vector in bitmap format //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Vector_import_Bitmap // historical: GxB_Vector_pack_Bitmap ( GrB_Vector *v, // handle of vector to create GrB_Type type, // type of vector to create GrB_Index n, // vector length int8_t **vb, // bitmap, vb_size >= n void **vx, // values, vx_size >= n * (type size) // or vx_size >= (type size), if iso is true GrB_Index vb_size, // size of vb in bytes GrB_Index vx_size, // size of vx in bytes bool iso, // if true, v is iso GrB_Index nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_pack_Bitmap // pack a bitmap vector ( GrB_Vector v, // vector to create (type and length unchanged) int8_t **vb, // bitmap, vb_size >= n void **vx, // values, vx_size >= n * (type size) // or vx_size >= (type size), if iso is true GrB_Index vb_size, // size of vb in bytes GrB_Index vx_size, // size of vx in bytes bool iso, // if true, v is iso GrB_Index nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; // The GrB_Vector is treated as if it was a single column of an n-by-1 // matrix in BitmapC format. //------------------------------------------------------------------------------ // GxB_Vector_pack_Full: pack a vector in full format //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GxB_Vector_import_Full // historical: use GxB_Vector_pack_Full ( GrB_Vector *v, // handle of vector to create GrB_Type type, // type of vector to create GrB_Index n, // vector length void **vx, // values, vx_size >= nvals(v) * (type size) // or vx_size >= (type size), if iso is true GrB_Index vx_size, // size of vx in bytes bool iso, // if true, v is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_pack_Full // pack a full vector ( GrB_Vector v, // vector to create (type and length unchanged) void **vx, // values, vx_size >= nvals(v) * (type size) // or vx_size >= (type size), if iso is true GrB_Index vx_size, // size of vx in bytes bool iso, // if true, v is iso const GrB_Descriptor desc ) ; // The GrB_Vector is treated as if it was a single column of an n-by-1 // matrix in FullC format. //------------------------------------------------------------------------------ // GxB* export/unpack //------------------------------------------------------------------------------ // The GxB_*_export/unpack functions are symmetric with the GxB_*_import/pack // functions. The export/unpack functions force completion of any pending // operations, prior to the export, except if the only pending operation is to // unjumble the matrix. // // If there are no entries in the matrix or vector, then the index arrays (Ai, // Aj, or vi) and value arrays (Ax or vx) are returned as NULL. This is not an // error condition. // // A GrB_Matrix may be exported/unpacked in any one of four different formats. // On successful export, the input GrB_Matrix A is freed, and the output arrays // Ah, Ap, Ai, Aj, and/or Ax are returned to the user application as arrays // allocated by the ANSI C malloc function. The four formats are the same as // the import formats for GxB_Matrix_import/pack. // // If jumbled is NULL on input, this indicates to GxB_*export/unpack* that the // exported/unpacked matrix cannot be returned in a jumbled format. In this // case, if the matrix is jumbled, it is sorted before exporting it to the // caller. // // If iso is NULL on input, this indicates to the export/unpack methods that // the exported/unpacked matrix cannot be returned in a iso format, with an Ax // array with just one entry. In this case, if the matrix is iso, it is // expanded before exporting/unpacking it to the caller. // // For the export/unpack*Full* methods, all entries in the matrix or must be // present. That is, GrB_*_nvals must report nvals equal to nrows*ncols or a // matrix. If this condition does not hold, the matrix/vector is not exported, // and GrB_INVALID_VALUE is returned. // // If the export/unpack is not successful, the export/unpack functions do not // modify matrix or vector and the user arrays are returned as NULL. GB_PUBLIC GrB_Info GxB_Matrix_export_CSR // historical: use GxB_Matrix_unpack_CSR ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix GrB_Index **Ap, // row "pointers" GrB_Index **Aj, // column indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Aj_size, // size of Aj in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso bool *jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_CSR // unpack a CSR matrix ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) GrB_Index **Ap, // row "pointers" GrB_Index **Aj, // column indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Aj_size, // size of Aj in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso bool *jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_CSC // historical: use GxB_Matrix_unpack_CSC ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix GrB_Index **Ap, // column "pointers" GrB_Index **Ai, // row indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Ai_size, // size of Ai in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso bool *jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_CSC // unpack a CSC matrix ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) GrB_Index **Ap, // column "pointers" GrB_Index **Ai, // row indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Ai_size, // size of Ai in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso bool *jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_HyperCSR // historical: use GxB_Matrix_unpack_HyperCSR ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix GrB_Index **Ap, // row "pointers" GrB_Index **Ah, // row indices GrB_Index **Aj, // column indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Ah_size, // size of Ah in bytes GrB_Index *Aj_size, // size of Aj in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvec, // number of rows that appear in Ah bool *jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_HyperCSR // unpack a hypersparse CSR matrix ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) GrB_Index **Ap, // row "pointers" GrB_Index **Ah, // row indices GrB_Index **Aj, // column indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Ah_size, // size of Ah in bytes GrB_Index *Aj_size, // size of Aj in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvec, // number of rows that appear in Ah bool *jumbled, // if true, indices in each row may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_HyperCSC // historical: use GxB_Matrix_unpack_HyperCSC ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix GrB_Index **Ap, // column "pointers" GrB_Index **Ah, // column indices GrB_Index **Ai, // row indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Ah_size, // size of Ah in bytes GrB_Index *Ai_size, // size of Ai in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvec, // number of columns that appear in Ah bool *jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_HyperCSC // unpack a hypersparse CSC matrix ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) GrB_Index **Ap, // column "pointers" GrB_Index **Ah, // column indices GrB_Index **Ai, // row indices void **Ax, // values GrB_Index *Ap_size, // size of Ap in bytes GrB_Index *Ah_size, // size of Ah in bytes GrB_Index *Ai_size, // size of Ai in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvec, // number of columns that appear in Ah bool *jumbled, // if true, indices in each column may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_BitmapR // historical: use GxB_Matrix_unpack_BitmapR ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix int8_t **Ab, // bitmap void **Ax, // values GrB_Index *Ab_size, // size of Ab in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_BitmapR // unpack a bitmap matrix, by row ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) int8_t **Ab, // bitmap void **Ax, // values GrB_Index *Ab_size, // size of Ab in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_BitmapC // historical: use GxB_Matrix_unpack_BitmapC ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix int8_t **Ab, // bitmap void **Ax, // values GrB_Index *Ab_size, // size of Ab in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_BitmapC // unpack a bitmap matrix, by col ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) int8_t **Ab, // bitmap void **Ax, // values GrB_Index *Ab_size, // size of Ab in bytes GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso GrB_Index *nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_FullR // historical: use GxB_Matrix_unpack_FullR ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix void **Ax, // values GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_FullR // unpack a full matrix, by row ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) void **Ax, // values GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_FullC // historical: use GxB_Matrix_unpack_FullC ( GrB_Matrix *A, // handle of matrix to export and free GrB_Type *type, // type of matrix exported GrB_Index *nrows, // number of rows of the matrix GrB_Index *ncols, // number of columns of the matrix void **Ax, // values GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_unpack_FullC // unpack a full matrix, by column ( GrB_Matrix A, // matrix to unpack (type, nrows, ncols unchanged) void **Ax, // values GrB_Index *Ax_size, // size of Ax in bytes bool *iso, // if true, A is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_export_CSC // historical: use GxB_Vector_unpack_CSC ( GrB_Vector *v, // handle of vector to export and free GrB_Type *type, // type of vector exported GrB_Index *n, // length of the vector GrB_Index **vi, // indices void **vx, // values GrB_Index *vi_size, // size of vi in bytes GrB_Index *vx_size, // size of vx in bytes bool *iso, // if true, v is iso GrB_Index *nvals, // # of entries in vector bool *jumbled, // if true, indices may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_unpack_CSC // unpack a CSC vector ( GrB_Vector v, // vector to unpack (type and length unchanged) GrB_Index **vi, // indices void **vx, // values GrB_Index *vi_size, // size of vi in bytes GrB_Index *vx_size, // size of vx in bytes bool *iso, // if true, v is iso GrB_Index *nvals, // # of entries in vector bool *jumbled, // if true, indices may be unsorted const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_export_Bitmap // historical: use GxB_Vector_unpack_Bitmap ( GrB_Vector *v, // handle of vector to export and free GrB_Type *type, // type of vector exported GrB_Index *n, // length of the vector int8_t **vb, // bitmap void **vx, // values GrB_Index *vb_size, // size of vb in bytes GrB_Index *vx_size, // size of vx in bytes bool *iso, // if true, v is iso GrB_Index *nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_unpack_Bitmap // unpack a bitmap vector ( GrB_Vector v, // vector to unpack (type and length unchanged) int8_t **vb, // bitmap void **vx, // values GrB_Index *vb_size, // size of vb in bytes GrB_Index *vx_size, // size of vx in bytes bool *iso, // if true, v is iso GrB_Index *nvals, // # of entries in bitmap const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_export_Full // historical: use GxB_Vector_unpack_Full ( GrB_Vector *v, // handle of vector to export and free GrB_Type *type, // type of vector exported GrB_Index *n, // length of the vector void **vx, // values GrB_Index *vx_size, // size of vx in bytes bool *iso, // if true, v is iso const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Vector_unpack_Full // unpack a full vector ( GrB_Vector v, // vector to unpack (type and length unchanged) void **vx, // values GrB_Index *vx_size, // size of vx in bytes bool *iso, // if true, v is iso const GrB_Descriptor desc ) ; //============================================================================== // GrB import/export //============================================================================== // The GrB_Matrix_import method copies from user-provided arrays into an // opaque GrB_Matrix and GrB_Matrix_export copies data out, from an opaque // GrB_Matrix into user-provided arrays. Unlike the GxB pack/unpack methods, // memory is not handed off between the user application and GraphBLAS. // These methods are much slower than the GxB pack/unpack methods, since they // require a copy of the data to be made. GrB_Matrix_import also must assume // its input data cannot be trusted, and so it does extensive checks. The GxB // pack takes O(1) time in all cases (unless it is told the input data is // untrusted, via the descriptor). GxB unpack takes O(1) time unless the // matrix is exported in a different format than it currently has. // No typecasting of the values is done on import or export. // The GrB C API specification supports 3 formats: typedef enum { GrB_CSR_FORMAT = 0, // CSR format (equiv to GxB_SPARSE with GxB_BY_ROW) GrB_CSC_FORMAT = 1, // CSC format (equiv to GxB_SPARSE with GxB_BY_COL) GrB_COO_FORMAT = 2 // triplet format (like input to GrB*build) } GrB_Format ; GB_PUBLIC GrB_Info GrB_Matrix_import_BOOL // import a GrB_BOOL matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_BOOL) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const bool *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_INT8 // import a GrB_INT8 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_iNT8) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const int8_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_INT16 // import a GrB_INT16 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_INT16) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const int16_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_INT32 // import a GrB_INT32 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_INT32) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const int32_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_INT64 // import a GrB_INT64 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_INT64) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const int64_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_UINT8 // import a GrB_UINT8 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_UINT8) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const uint8_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_UINT16 // import a GrB_UINT16 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_UINT16) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const uint16_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_UINT32 // import a GrB_UINT32 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_UINT32) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const uint32_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_UINT64 // import a GrB_UINT64 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_UINT64) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const uint64_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_FP32 // import a GrB_FP32 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_FP32) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const float *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_FP64 // import a GrB_FP64 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GrB_FP64) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const double *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GxB_Matrix_import_FC32 // import a GxB_FC32 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GxB_FC32) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const GxB_FC32_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GxB_Matrix_import_FC64 // import a GxB_FC64 matrix ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create (must be GxB_FC64) GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const GxB_FC64_t *Ax, // values GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; GB_PUBLIC GrB_Info GrB_Matrix_import_UDT // import a matrix with a user-defined type ( GrB_Matrix *A, // handle of matrix to create GrB_Type type, // type of matrix to create GrB_Index nrows, // number of rows of the matrix GrB_Index ncols, // number of columns of the matrix const GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO const GrB_Index *Ai, // row indices for CSR, CSC const void *Ax, // values (must match the type parameter) GrB_Index Ap_len, // number of entries in Ap (not # of bytes) GrB_Index Ai_len, // number of entries in Ai (not # of bytes) GrB_Index Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format // import format ) ; #if GxB_STDC_VERSION >= 201112L #define GrB_Matrix_import(A,type,nrows,ncols,Ap,Ai,Ax,Ap_len,Ai_len,Ax_len,fmt)\ _Generic \ ( \ (Ax), \ GB_CASES (*, GrB, Matrix_import) \ ) \ (A, type, nrows, ncols, Ap, Ai, Ax, Ap_len, Ai_len, Ax_len, fmt) #endif // For GrB_Matrix_export_T: on input, Ap_len, Ai_len, and Ax_len are // the size of the 3 arrays Ap, Ai, and Ax, in terms of the # of entries. // On output, these 3 values are modified to be the # of entries copied // into those 3 arrays. GB_PUBLIC GrB_Info GrB_Matrix_export_BOOL // export a GrB_BOOL matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC bool *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_BOOL) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_INT8 // export a GrB_INT8 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC int8_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_INT8) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_INT16 // export a GrB_INT16 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC int16_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_INT16) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_INT32 // export a GrB_INT32 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC int32_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_INT32) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_INT64 // export a GrB_INT64 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC int64_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_INT64) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_UINT8 // export a GrB_UINT8 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC uint8_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_UINT8) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_UINT16 // export a GrB_UINT16 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC uint16_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_UINT16) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_UINT32 // export a GrB_UINT32 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC uint32_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_UINT32) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_UINT64 // export a GrB_UINT64 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC uint64_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_UINT64) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_FP32 // export a GrB_FP32 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC float *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_FP32) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_FP64 // export a GrB_FP64 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC double *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_FP64) ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_FC32 // export a GrB_FC32 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC GxB_FC32_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_FC32) ) ; GB_PUBLIC GrB_Info GxB_Matrix_export_FC64 // export a GrB_FC64 matrix ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC GxB_FC64_t *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export (must be of type GrB_FC64) ) ; GB_PUBLIC GrB_Info GrB_Matrix_export_UDT // export a matrix with a user-defined type ( GrB_Index *Ap, // pointers for CSR, CSC, column indices for COO GrB_Index *Ai, // col indices for CSR/COO, row indices for CSC void *Ax, // values (must match the type of A) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export ) ; #if GxB_STDC_VERSION >= 201112L #define GrB_Matrix_export(Ap,Ai,Ax,Ap_len,Ai_len,Ax_len,fmt,A) \ _Generic \ ( \ (Ax), \ GB_CASES (*, GrB, Matrix_export) \ ) \ (Ap, Ai, Ax, Ap_len, Ai_len, Ax_len, fmt, A) #endif GB_PUBLIC GrB_Info GrB_Matrix_exportSize // determine sizes of user arrays for export ( GrB_Index *Ap_len, // # of entries required for Ap (not # of bytes) GrB_Index *Ai_len, // # of entries required for Ai (not # of bytes) GrB_Index *Ax_len, // # of entries required for Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A // matrix to export ) ; GB_PUBLIC GrB_Info GrB_Matrix_exportHint // suggest the best export format ( GrB_Format *format, // export format GrB_Matrix A // matrix to export ) ; //============================================================================== // serialize/deserialize //============================================================================== // GxB_Matrix_serialize copies the contents of a GrB_Matrix into a single array // of bytes (the "blob"). The contents of the blob are implementation // dependent. The blob can be saved to a file, or sent across a communication // channel, and then a GrB_Matrix can be reconstructed from the blob, even on // another process or another machine, using the same version of // SuiteSparse:GraphBLAS (v5.2.0 or later). The goal is that future versions // of SuiteSparse:GraphBLAS should be able to read in the blob as well, and // reconstruct a matrix. The matrix can be reconstructed from the blob using // GxB_Matrix_deserialize. The blob is compressed, by default, and // uncompressed by GxB_Matrix_deserialize. // GrB_Matrix_serialize/deserialize are slightly different from their GxB* // counterparts. The blob is allocated by GxB_Matrix_serialize, and must be // freed by GxB_serialize_free (which calls the ANSI C11 free if GrB_init was // used). By contrast, the GrB* methods require the user application to pass // in a preallocated blob to GrB_Matrix_serialize, whose size can be given by // GrB_Matrix_serializeSize (as a loose upper bound). // The GrB* and GxB* methods can be mixed. GrB_Matrix_serialize and // GxB_Matrix_serialize construct the same blob (assuming they are given the // same # of threads to do the work). Both GrB_Matrix_deserialize and // GxB_Matrix_deserialize can deserialize a blob coming from either // GrB_Matrix_serialize or GxB_Matrix_serialize. // Deserialization of untrusted data is a common security problem; see // https://cwe.mitre.org/data/definitions/502.html. The deserialization methods // below do a few basic checks so that no out-of-bounds access occurs during // deserialization, but the output matrix itself may still be corrupted. If // the data is untrusted, use this to check the matrix: // GxB_Matrix_fprint (A, "A deserialized", GrB_SILENT, NULL) // Example usage: /* //-------------------------------------------------------------------------- // using GxB serialize/deserialize //-------------------------------------------------------------------------- // Given a GrB_Matrix A: assuming a user-defined type: void *blob ; GrB_Index blob_size ; GxB_Matrix_serialize (&blob, &blob_size, A, NULL) ; FILE *f = fopen ("myblob", "w") ; fwrite (blob_size, sizeof (size_t), 1, f) ; fwrite (blob, sizeof (uint8_t), blob_size, f) ; fclose (f) ; GrB_Matrix_free (&A) ; // B is a copy of A GxB_Matrix_deserialize (&B, MyQtype, blob, blob_size, NULL) ; GrB_Matrix_free (&B) ; free (blob) ; GrB_finalize ( ) ; // --- in another process, to recreate the GrB_Matrix A: GrB_init (GrB_NONBLOCKING) ; FILE *f = fopen ("myblob", "r") ; fread (&blob_size, sizeof (size_t), 1, f) ; blob = malloc (blob_size) ; fread (blob, sizeof (uint8_t), blob_size, f) ; fclose (f) ; char type_name [GxB_MAX_NAME_LEN] ; GxB_deserialize_type_name (type_name, blob, blob_size) ; printf ("blob type is: %s\n", type_name) ; GrB_Type user_type = NULL ; if (strncmp (type_name, "myquaternion", GxB_MAX_NAME_LEN) == 0) user_type = MyQtype ; GxB_Matrix_deserialize (&A, user_type, blob, blob_size, NULL) ; free (blob) ; // note, freed by the user, not GraphBLAS //-------------------------------------------------------------------------- // using GrB serialize/deserialize //-------------------------------------------------------------------------- // Given a GrB_Matrix A: assuming a user-defined type, MyQType: void *blob = NULL ; GrB_Index blob_size = 0 ; GrB_Matrix A, B = NULL ; // construct a matrix A, then serialized it: GrB_Matrix_serializeSize (&blob_size, A) ; // loose upper bound blob = malloc (blob_size) ; GrB_Matrix_serialize (blob, &blob_size, A) ; // returns actual size blob = realloc (blob, blob_size) ; // user can shrink the blob FILE *f = fopen ("myblob", "w") ; fwrite (blob_size, sizeof (size_t), 1, f) ; fwrite (blob, sizeof (uint8_t), blob_size, f) ; fclose (f) ; GrB_Matrix_free (&A) ; // B is a copy of A: GrB_Matrix_deserialize (&B, MyQtype, blob, blob_size) ; GrB_Matrix_free (&B) ; free (blob) ; GrB_finalize ( ) ; // --- in another process, to recreate the GrB_Matrix A: GrB_init (GrB_NONBLOCKING) ; FILE *f = fopen ("myblob", "r") ; fread (&blob_size, sizeof (size_t), 1, f) ; blob = malloc (blob_size) ; fread (blob, sizeof (uint8_t), blob_size, f) ; fclose (f) ; // the user must know the type of A is MyQType GrB_Matrix_deserialize (&A, MyQtype, blob, blob_size) ; free (blob) ; */ // Three methods are currently implemented: no compression, LZ4, and LZ4HC #define GxB_COMPRESSION_NONE -1 // no compression #define GxB_COMPRESSION_DEFAULT 0 // LZ4 #define GxB_COMPRESSION_LZ4 1000 // LZ4 #define GxB_COMPRESSION_LZ4HC 2000 // LZ4HC, with default level 9 // possible future methods that could be added: // #define GxB_COMPRESSION_ZLIB 3000 // ZLIB, with default level 6 // #define GxB_COMPRESSION_LZO 4000 // LZO, with default level 2 // #define GxB_COMPRESSION_BZIP2 5000 // BZIP2, with default level 9 // #define GxB_COMPRESSION_LZSS 6000 // LZSS // using the Intel IPP versions, if available (not yet supported); #define GxB_COMPRESSION_INTEL 1000000 // Most of the above methods have a level parameter that controls the tradeoff // between run time and the amount of compression obtained. Higher levels // result in a more compact result, at the cost of higher run time: // LZ4 no level setting // LZ4HC 1: fast, 9: default, 9: max // these methos are not yet supported but may be added in the future: // ZLIB 1: fast, 6: default, 9: max // LZO 1: fast (X1ST), 2: default (XST) // BZIP2 1: fast, 9: default, 9: max // LZSS no level setting // For all methods, a level of zero results in the default level setting. // These settings can be added, so to use LZ4HC at level 5, use method = // GxB_COMPRESSION_LZ4HC + 5. // If the Intel IPPS compression methods are available, they can be selected // by adding GxB_COMPRESSION_INTEL. For example, to use the Intel IPPS // implementation of LZ4HC at level 9, use method = GxB_COMPRESSION_INTEL + // GxB_COMPRESSION_LZ4HC + 9 = 1,002,009. If the Intel methods are requested // but not available, this setting is ignored and the non-Intel methods are // used instead. // If the level setting is out of range, the default is used for that method. // If the method is negative, no compression is performed. If the method is // positive but unrecognized, the default is used (GxB_COMPRESSION_LZ4, with no // level setting, and the non-Intel version). // If a method is not implemented, LZ4 is used instead, and the level setting // is ignored. GB_PUBLIC GrB_Info GxB_Matrix_serialize // serialize a GrB_Matrix to a blob ( // output: void **blob_handle, // the blob, allocated on output GrB_Index *blob_size_handle, // size of the blob on output // input: GrB_Matrix A, // matrix to serialize const GrB_Descriptor desc // descriptor to select compression method // and to control # of threads used ) ; GB_PUBLIC GrB_Info GrB_Matrix_serialize // serialize a GrB_Matrix to a blob ( // output: void *blob, // the blob, already allocated in input // input/output: GrB_Index *blob_size_handle, // size of the blob on input. On output, // the # of bytes used in the blob. // input: GrB_Matrix A // matrix to serialize ) ; GB_PUBLIC GrB_Info GxB_Vector_serialize // serialize a GrB_Vector to a blob ( // output: void **blob_handle, // the blob, allocated on output GrB_Index *blob_size_handle, // size of the blob on output // input: GrB_Vector u, // vector to serialize const GrB_Descriptor desc // descriptor to select compression method // and to control # of threads used ) ; GB_PUBLIC GrB_Info GrB_Matrix_serializeSize // estimate the size of a blob ( // output: GrB_Index *blob_size_handle, // upper bound on the required size of the // blob on output. // input: GrB_Matrix A // matrix to serialize ) ; // The GrB* and GxB* deserialize methods are nearly identical. The GxB* // deserialize methods simply add the descriptor, which allows for optional // control of the # of threads used to deserialize the blob. GB_PUBLIC GrB_Info GxB_Matrix_deserialize // deserialize blob into a GrB_Matrix ( // output: GrB_Matrix *C, // output matrix created from the blob // input: GrB_Type type, // type of the matrix C. Required if the blob holds a // matrix of user-defined type. May be NULL if blob // holds a built-in type; otherwise must match the // type of C. const void *blob, // the blob GrB_Index blob_size, // size of the blob const GrB_Descriptor desc // to control # of threads used ) ; GB_PUBLIC GrB_Info GrB_Matrix_deserialize // deserialize blob into a GrB_Matrix ( // output: GrB_Matrix *C, // output matrix created from the blob // input: GrB_Type type, // type of the matrix C. Required if the blob holds a // matrix of user-defined type. May be NULL if blob // holds a built-in type; otherwise must match the // type of C. const void *blob, // the blob GrB_Index blob_size // size of the blob ) ; GB_PUBLIC GrB_Info GxB_Vector_deserialize // deserialize blob into a GrB_Vector ( // output: GrB_Vector *w, // output vector created from the blob // input: GrB_Type type, // type of the vector w. Required if the blob holds a // vector of user-defined type. May be NULL if blob // holds a built-in type; otherwise must match the // type of w. const void *blob, // the blob GrB_Index blob_size, // size of the blob const GrB_Descriptor desc // to control # of threads used ) ; // GxB_deserialize_type_name extracts the type_name of the GrB_Type of the // GrB_Matrix or GrB_Vector held in a serialized blob. On input, type_name // must point to a user-owned char array of size at least GxB_MAX_NAME_LEN (it // must not point into the blob itself). On output, type_name will contain a // null-terminated string with the corresponding C type name. If the blob // holds a matrix of a built-in type, the name is returned as "bool" for // GrB_BOOL, "uint8_t" for GrB_UINT8, "float complex" for GxB_FC32, etc. // See GxB_Type_name to convert this name into a GrB_Type. GB_PUBLIC GrB_Info GxB_deserialize_type_name // return the type name of a blob ( // output: char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). // input, not modified: const void *blob, // the blob GrB_Index blob_size // size of the blob ) ; //============================================================================== // GxB_Vector_sort and GxB_Matrix_sort: sort a matrix or vector //============================================================================== GB_PUBLIC GrB_Info GxB_Vector_sort ( // output: GrB_Vector w, // vector of sorted values GrB_Vector p, // vector containing the permutation // input GrB_BinaryOp op, // comparator op GrB_Vector u, // vector to sort const GrB_Descriptor desc ) ; GB_PUBLIC GrB_Info GxB_Matrix_sort ( // output: GrB_Matrix C, // matrix of sorted values GrB_Matrix P, // matrix containing the permutations // input GrB_BinaryOp op, // comparator op GrB_Matrix A, // matrix to sort const GrB_Descriptor desc ) ; #define GxB_sort(arg1,...) \ _Generic \ ( \ (arg1), \ GrB_Vector : GxB_Vector_sort , \ GrB_Matrix : GxB_Matrix_sort \ ) \ (arg1, __VA_ARGS__) //============================================================================== // GxB_Iterator: an object that iterates over the entries of a matrix or vector //============================================================================== /* Example usage: single thread iteration of a whole matrix, one row at a time (in the outer loop), and one entry at a time within the row (in the inner loop): // create an iterator GxB_Iterator iterator ; GxB_Iterator_new (&iterator) ; // attach it to the matrix A, known to be type GrB_FP64 GrB_Info info = GxB_rowIterator_attach (iterator, A, NULL) ; if (info < 0) { handle the failure ... } // seek to A(0,:) info = GxB_rowIterator_seekRow (iterator, 0) ; while (info != GxB_EXHAUSTED) { // iterate over entries in A(i,:) GrB_Index i = GxB_rowIterator_getRowIndex (iterator) ; while (info == GrB_SUCCESS) { // get the entry A(i,j) GrB_Index j = GxB_rowIterator_getColIndex (iterator) ; double aij = GxB_Iterator_get_FP64 (iterator) ; // move to the next entry in A(i,:) info = GxB_rowIterator_nextCol (iterator) ; } // move to the next row, A(i+1,:) info = GxB_rowIterator_nextRow (iterator) ; } GrB_free (&iterator) ; parallel iteration using 4 threads (work may be imbalanced however): GrB_Index nrows ; GrB_wait (A, GrB_MATERIALIZE) ; // this is essential GrB_Matrix_nrows (&nrows, A) ; #pragma omp parallel for num_threads(4) for (int tid = 0 ; tid < 4 ; tid++) { // thread tid operates on A(row1:row2-1,:) GrB_Index row1 = tid * (nrows / 4) ; GrB_Index row2 = (tid == 3) ? nrows : ((tid+1) * (nrows / 4)) ; GxB_Iterator iterator ; GxB_Iterator_new (&iterator) ; GrB_Info info = GxB_rowIterator_attach (iterator, A, NULL) ; if (info < 0) { handle the failure ... } // seek to A(row1,:) info = GxB_rowIterator_seekRow (iterator, row1) ; while (info != GxB_EXHAUSTED) { // iterate over entries in A(i,:) GrB_Index i = GxB_rowIterator_getRowIndex (iterator) ; if (i >= row2) break ; while (info == GrB_SUCCESS) { // get the entry A(i,j) GrB_Index j = GxB_rowIterator_getColIndex (iterator) ; double aij = GxB_Iterator_get_FP64 (iterator) ; // move to the next entry in A(i,:) info = GxB_rowIterator_nextCol (iterator) ; } // move to the next row, A(i+1,:) info = GxB_rowIterator_nextRow (iterator) ; } GrB_free (&iterator) ; } In the parallel example above, a more balanced work distribution can be obtained by first computing the row degree via GrB_mxv (see LAGraph), and then compute the cumulative sum (ideally in parallel). Next, partition the cumulative sum into one part per thread via binary search, and divide the rows into parts accordingly. */ //------------------------------------------------------------------------------ // GxB_Iterator: definition and new/free methods //------------------------------------------------------------------------------ // The contents of an iterator must not be directly accessed by the user // application. Only the functions and macros provided here may access // "iterator->..." contents. The iterator is defined here only so that macros // can be used to speed up the use of the iterator methods. User applications // must not use "iterator->..." directly. struct GB_Iterator_opaque { // these components change as the iterator moves (via seek or next): int64_t pstart ; // the start of the current vector int64_t pend ; // the end of the current vector int64_t p ; // position of the current entry int64_t k ; // the current vector // only changes when the iterator is created: size_t header_size ; // size of this iterator object // these components only change when the iterator is attached: int64_t pmax ; // avlen*avdim for bitmap; nvals(A) otherwise int64_t avlen ; // length of each vector in the matrix int64_t avdim ; // number of vectors in the matrix dimension int64_t anvec ; // # of vectors present in the matrix const int64_t *GB_restrict Ap ; // pointers for sparse and hypersparse const int64_t *GB_restrict Ah ; // vector names for hypersparse const int8_t *GB_restrict Ab ; // bitmap const int64_t *GB_restrict Ai ; // indices for sparse and hypersparse const void *GB_restrict Ax ; // values for all 4 data structures size_t type_size ; // size of the type of A int A_sparsity ; // sparse, hyper, bitmap, or full bool iso ; // true if A is iso-valued, false otherwise bool by_col ; // true if A is held by column, false if by row } ; typedef struct GB_Iterator_opaque *GxB_Iterator ; // GxB_Iterator_new: create a new iterator, not attached to any matrix/vector GB_PUBLIC GrB_Info GxB_Iterator_new (GxB_Iterator *iterator) ; // GxB_Iterator_free: free an iterator GB_PUBLIC GrB_Info GxB_Iterator_free (GxB_Iterator *iterator) ; //============================================================================== // GB_Iterator_*: implements user-callable GxB_*Iterator_* methods //============================================================================== // GB_* methods are not user-callable. These methods appear here so that the // iterator methods can be done via macros. //------------------------------------------------------------------------------ // GB_Iterator_attach: attach a row/col/entry iterator to a matrix //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GB_Iterator_attach ( GxB_Iterator iterator, // iterator to attach to the matrix A GrB_Matrix A, // matrix to attach GxB_Format_Value format, // by row, by col, or by entry (GxB_NO_FORMAT) GrB_Descriptor desc ) ; //------------------------------------------------------------------------------ // GB_Iterator_rc_seek: seek a row/col iterator to a particular vector //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GB_Iterator_rc_seek ( GxB_Iterator iterator, GrB_Index j, bool jth_vector ) ; //------------------------------------------------------------------------------ // GB_Iterator_rc_bitmap_next: move a row/col iterator to next entry in bitmap //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info GB_Iterator_rc_bitmap_next (GxB_Iterator iterator) ; //------------------------------------------------------------------------------ // GB_Iterator_rc_knext: move a row/col iterator to the next vector //------------------------------------------------------------------------------ #define GB_Iterator_rc_knext(iterator) \ ( \ /* move to the next vector, and check if iterator is exhausted */ \ (++(iterator->k) >= iterator->anvec) ? \ ( \ /* iterator is at the end of the matrix */ \ iterator->pstart = 0, \ iterator->pend = 0, \ iterator->p = 0, \ iterator->k = iterator->anvec, \ GxB_EXHAUSTED \ ) \ : \ ( \ /* find first entry in vector, and pstart/pend for this vector */ \ (iterator->A_sparsity <= GxB_SPARSE) ? \ ( \ /* matrix is sparse or hypersparse */ \ iterator->pstart = iterator->Ap [iterator->k], \ iterator->pend = iterator->Ap [iterator->k+1], \ iterator->p = iterator->pstart, \ ((iterator->p >= iterator->pend) ? GrB_NO_VALUE : GrB_SUCCESS) \ ) \ : \ ( \ /* matrix is bitmap or full */ \ iterator->pstart += iterator->avlen, \ iterator->pend += iterator->avlen, \ iterator->p = iterator->pstart, \ (iterator->A_sparsity <= GxB_BITMAP) ? \ ( \ /* matrix is bitmap */ \ GB_Iterator_rc_bitmap_next (iterator) \ ) \ : \ ( \ /* matrix is full */ \ ((iterator->p >= iterator->pend) ? GrB_NO_VALUE : GrB_SUCCESS) \ ) \ ) \ ) \ ) //------------------------------------------------------------------------------ // GB_Iterator_rc_inext: move a row/col iterator the next entry in the vector //------------------------------------------------------------------------------ #define GB_Iterator_rc_inext(iterator) \ ( \ /* move to the next entry in the vector */ \ (++(iterator->p) >= iterator->pend) ? \ ( \ /* no more entries in the current vector */ \ GrB_NO_VALUE \ ) \ : \ ( \ (iterator->A_sparsity == GxB_BITMAP) ? \ ( \ /* the matrix is in bitmap form */ \ GB_Iterator_rc_bitmap_next (iterator) \ ) \ : \ ( \ GrB_SUCCESS \ ) \ ) \ ) //------------------------------------------------------------------------------ // GB_Iterator_rc_getj: get index of current vector for row/col iterator //------------------------------------------------------------------------------ #define GB_Iterator_rc_getj(iterator) \ ( \ (iterator->k >= iterator->anvec) ? \ ( \ /* iterator is past the end of the matrix */ \ iterator->avdim \ ) \ : \ ( \ (iterator->A_sparsity == GxB_HYPERSPARSE) ? \ ( \ /* return the name of kth vector: j = Ah [k] if it appears */ \ iterator->Ah [iterator->k] \ ) \ : \ ( \ /* return the kth vector: j = k */ \ iterator->k \ ) \ ) \ ) //------------------------------------------------------------------------------ // GB_Iterator_rc_geti: return index of current entry for row/col iterator //------------------------------------------------------------------------------ #define GB_Iterator_rc_geti(iterator) \ ( \ (iterator->Ai != NULL) ? \ ( \ iterator->Ai [iterator->p] \ ) \ : \ ( \ (iterator->p - iterator->pstart) \ ) \ ) //============================================================================== // GxB_rowIterator_*: iterate over the rows of a matrix //============================================================================== #undef GxB_rowIterator_attach #undef GxB_rowIterator_kount #undef GxB_rowIterator_seekRow #undef GxB_rowIterator_kseek #undef GxB_rowIterator_nextRow #undef GxB_rowIterator_nextCol #undef GxB_rowIterator_getRowIndex #undef GxB_rowIterator_getColIndex //------------------------------------------------------------------------------ // GxB_rowIterator_attach: attach a row iterator to a matrix //------------------------------------------------------------------------------ // On input, the iterator must already exist, having been created by // GxB_Iterator_new. // GxB_rowIterator_attach attaches a row iterator to a matrix. If the iterator // is already attached to a matrix, it is detached and then attached to the // given matrix A. // The following error conditions are returned: // GrB_NULL_POINTER: if the iterator or A are NULL. // GrB_INVALID_OBJECT: if the matrix A is invalid. // GrB_NOT_IMPLEMENTED: if the matrix A cannot be iterated by row. // GrB_OUT_OF_MEMORY: if the method runs out of memory. // If successful, the row iterator is attached to the matrix, but not to any // specific row. Use GxB_rowIterator_*seek* to move the iterator to a row. GB_PUBLIC GrB_Info GxB_rowIterator_attach ( GxB_Iterator iterator, GrB_Matrix A, GrB_Descriptor desc ) ; #define GxB_rowIterator_attach(iterator, A, desc) \ ( \ GB_Iterator_attach (iterator, A, GxB_BY_ROW, desc) \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_kount: upper bound on the # of nonempty rows of a matrix //------------------------------------------------------------------------------ // On input, the row iterator must be attached to a matrix, but need not be at // any specific row; results are undefined if this condition is not met. // GxB_rowIterator_kount returns an upper bound on the # of non-empty rows of a // matrix. A GraphBLAS library may always return this as simply nrows(A), but // in some libraries, it may be a value between the # of rows with at least one // entry, and nrows(A), inclusive. Any value in this range is a valid return // value from this function. // For SuiteSparse:GraphBLAS: If A is m-by-n, and sparse, bitmap, or full, then // kount == m. If A is hypersparse, kount is the # of vectors held in the data // structure for the matrix, some of which may be empty, and kount <= m. GB_PUBLIC GrB_Index GxB_rowIterator_kount (GxB_Iterator iterator) ; #define GxB_rowIterator_kount(iterator) \ ( \ (iterator)->anvec \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_seekRow: move a row iterator to a different row of a matrix //------------------------------------------------------------------------------ // On input, the row iterator must be attached to a matrix, but need not be at // any specific row; results are undefined if this condition is not met. // GxB_rowIterator_seekRow moves a row iterator to the first entry of A(row,:). // If A(row,:) has no entries, the iterator may move to the first entry of next // nonempty row i for some i > row. The row index can be determined by // GxB_rowIterator_getRowIndex. // For SuiteSparse:GraphBLAS: If the matrix is hypersparse, and the row // does not appear in the hyperlist, then the iterator is moved to the first // row after the given row that does appear in the hyperlist. // The method is always successful; the following are conditions are returned: // GxB_EXHAUSTED: if the row index is >= nrows(A); the row iterator is // exhausted, but is still attached to the matrix. // GrB_NO_VALUE: if the row index is valid but A(row,:) has no entries; the // row iterator is positioned at A(row,:). // GrB_SUCCESS: if the row index is valid and A(row,:) has at least one // entry. The row iterator is positioned at A(row,:). // GxB_rowIterator_get* can be used to return the indices of // the first entry in A(row,:), and GxB_Iterator_get* can // return its value. GB_PUBLIC GrB_Info GxB_rowIterator_seekRow (GxB_Iterator iterator, GrB_Index row) ; #define GxB_rowIterator_seekRow(iterator, row) \ ( \ GB_Iterator_rc_seek (iterator, row, false) \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_kseek: move a row iterator to a different row of a matrix //------------------------------------------------------------------------------ // On input, the row iterator must be attached to a matrix, but need not be at // any specific row; results are undefined if this condition is not met. // GxB_rowIterator_kseek is identical to GxB_rowIterator_seekRow, except for // how the row index is specified. The row is the kth non-empty row of A. // More precisely, k is in the range 0 to kount-1, where kount is the value // returned by GxB_rowIterator_kount. GB_PUBLIC GrB_Info GxB_rowIterator_kseek (GxB_Iterator iterator, GrB_Index k) ; #define GxB_rowIterator_kseek(iterator, k) \ ( \ GB_Iterator_rc_seek (iterator, k, true) \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_nextRow: move a row iterator to the next row of a matrix //------------------------------------------------------------------------------ // On input, the row iterator must already be attached to a matrix via a prior // call to GxB_rowIterator_attach, and the iterator must be at a specific row, // via a prior call to GxB_rowIterator_*seek* or GxB_rowIterator_nextRow; // results are undefined if this condition is not met. // If the the row iterator is currently at A(row,:), it is moved to A(row+1,:), // or to the first non-empty row after A(row,:), at the discretion of this // method. That is, empty rows may be skipped. // The method is always successful, and the return conditions are identical to // the return conditions of GxB_rowIterator_seekRow. GB_PUBLIC GrB_Info GxB_rowIterator_nextRow (GxB_Iterator iterator) ; #define GxB_rowIterator_nextRow(iterator) \ ( \ GB_Iterator_rc_knext (iterator) \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_nextCol: move a row iterator to the next entry in A(row,:) //------------------------------------------------------------------------------ // On input, the row iterator must already be attached to a matrix via a prior // call to GxB_rowIterator_attach, and the iterator must be at a specific row, // via a prior call to GxB_rowIterator_*seek* or GxB_rowIterator_nextRow; // results are undefined if this condition is not met. // The method is always successful, and returns the following conditions: // GrB_NO_VALUE: If the iterator is already exhausted, or if there is no // entry in the current A(row,;), // GrB_SUCCESS: If the row iterator has been moved to the next entry in // A(row,:). GB_PUBLIC GrB_Info GxB_rowIterator_nextCol (GxB_Iterator iterator) ; #define GxB_rowIterator_nextCol(iterator) \ ( \ GB_Iterator_rc_inext ((iterator)) \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_getRowIndex: get current row index of a row iterator //------------------------------------------------------------------------------ // On input, the iterator must be already successfully attached to matrix as a // row iterator; results are undefined if this condition is not met. // The method returns nrows(A) if the iterator is exhausted, or the current // row index otherwise. There need not be any entry in the current row. // Zero is returned if the iterator is attached to the matrix but // GxB_rowIterator_*seek* has not been called, but this does not mean the // iterator is positioned at row zero. GB_PUBLIC GrB_Index GxB_rowIterator_getRowIndex (GxB_Iterator iterator) ; #define GxB_rowIterator_getRowIndex(iterator) \ ( \ GB_Iterator_rc_getj ((iterator)) \ ) //------------------------------------------------------------------------------ // GxB_rowIterator_getColIndex: get current column index of a row iterator //------------------------------------------------------------------------------ // On input, the iterator must be already successfully attached to matrix as a // row iterator, and in addition, the row iterator must be positioned at a // valid entry present in the matrix. That is, the last call to // GxB_rowIterator_*seek* or GxB_rowIterator_*next*, must have returned // GrB_SUCCESS. Results are undefined if this condition is not met. GB_PUBLIC GrB_Index GxB_rowIterator_getColIndex (GxB_Iterator iterator) ; #define GxB_rowIterator_getColIndex(iterator) \ ( \ GB_Iterator_rc_geti ((iterator)) \ ) //============================================================================== // GxB_colIterator_*: iterate over columns of a matrix //============================================================================== // The column iterator is analoguous to the row iterator. #undef GxB_colIterator_attach #undef GxB_colIterator_kount #undef GxB_colIterator_seekCol #undef GxB_colIterator_kseek #undef GxB_colIterator_nextCol #undef GxB_colIterator_nextRow #undef GxB_colIterator_getColIndex #undef GxB_colIterator_getRowIndex // GxB_colIterator_attach: attach a column iterator to a matrix GB_PUBLIC GrB_Info GxB_colIterator_attach ( GxB_Iterator iterator, GrB_Matrix A, GrB_Descriptor desc ) ; #define GxB_colIterator_attach(iterator, A, desc) \ ( \ GB_Iterator_attach (iterator, A, GxB_BY_COL, desc) \ ) // GxB_colIterator_kount: return # of nonempty columns of the matrix GB_PUBLIC GrB_Index GxB_colIterator_kount (GxB_Iterator iterator) ; #define GxB_colIterator_kount(iterator) \ ( \ (iterator)->anvec \ ) // GxB_colIterator_seekCol: move a column iterator to A(:,col) GB_PUBLIC GrB_Info GxB_colIterator_seekCol (GxB_Iterator iterator, GrB_Index col) ; #define GxB_colIterator_seekCol(iterator, col) \ ( \ GB_Iterator_rc_seek (iterator, col, false) \ ) // GxB_colIterator_kseek: move a column iterator to kth non-empty column of A GB_PUBLIC GrB_Info GxB_colIterator_kseek (GxB_Iterator iterator, GrB_Index k) ; #define GxB_colIterator_kseek(iterator, k) \ ( \ GB_Iterator_rc_seek (iterator, k, true) \ ) // GxB_colIterator_nextCol: move a column iterator to first entry of next column GB_PUBLIC GrB_Info GxB_colIterator_nextCol (GxB_Iterator iterator) ; #define GxB_colIterator_nextCol(iterator) \ ( \ GB_Iterator_rc_knext ((iterator)) \ ) // GxB_colIterator_nextRow: move a column iterator to next entry in column GB_PUBLIC GrB_Info GxB_colIterator_nextRow (GxB_Iterator iterator) ; #define GxB_colIterator_nextRow(iterator) \ ( \ GB_Iterator_rc_inext ((iterator)) \ ) // GxB_colIterator_getColIndex: return the column index of current entry GB_PUBLIC GrB_Index GxB_colIterator_getColIndex (GxB_Iterator iterator) ; #define GxB_colIterator_getColIndex(iterator) \ ( \ GB_Iterator_rc_getj ((iterator)) \ ) // GxB_colIterator_getRowIndex: return the row index of current entry GB_PUBLIC GrB_Index GxB_colIterator_getRowIndex (GxB_Iterator iterator) ; #define GxB_colIterator_getRowIndex(iterator) \ ( \ GB_Iterator_rc_geti ((iterator)) \ ) //============================================================================== // GxB_Matrix_Iterator_*: iterate over the entries of a matrix //============================================================================== // Example usage: // single thread iteration of a whole matrix, one entry at at time /* // create an iterator GxB_Iterator iterator ; GxB_Iterator_new (&iterator) ; // attach it to the matrix A, known to be type GrB_FP64 GrB_Info info = GxB_Matrix_Iterator_attach (iterator, A, NULL) ; if (info < 0) { handle the failure ... } // seek to the first entry info = GxB_Matrix_Iterator_seek (iterator, 0) ; while (info != GxB_EXHAUSTED) { // get the entry A(i,j) GrB_Index i, j ; GxB_Matrix_Iterator_getIndex (iterator, &i, &j) ; double aij = GxB_Iterator_get_FP64 (iterator) ; // move to the next entry in A info = GxB_Matrix_Iterator_next (iterator) ; } GrB_free (&iterator) ; */ //------------------------------------------------------------------------------ // GxB_Matrix_Iterator_attach: attach an entry iterator to a matrix //------------------------------------------------------------------------------ // On input, the iterator must already exist, having been created by // GxB_Iterator_new. // GxB_Matrix_Iterator_attach attaches an entry iterator to a matrix. If the // iterator is already attached to a matrix, it is detached and then attached // to the given matrix A. // The following error conditions are returned: // GrB_NULL_POINTER: if the iterator or A are NULL. // GrB_INVALID_OBJECT: if the matrix A is invalid. // GrB_OUT_OF_MEMORY: if the method runs out of memory. // If successful, the entry iterator is attached to the matrix, but not to any // specific entry. Use GxB_Matrix_Iterator_*seek* to move the iterator to a // particular entry. GB_PUBLIC GrB_Info GxB_Matrix_Iterator_attach ( GxB_Iterator iterator, GrB_Matrix A, GrB_Descriptor desc ) ; //------------------------------------------------------------------------------ // GxB_Matrix_Iterator_getpmax: return the range of the iterator //------------------------------------------------------------------------------ // On input, the entry iterator must be already attached to a matrix via // GxB_Matrix_Iterator_attach; results are undefined if this condition is not // met. // Entries in a matrix are given an index p, ranging from 0 to pmax-1, where // pmax >= nvals(A). For sparse, hypersparse, and full matrices, pmax is equal // to nvals(A). For an m-by-n bitmap matrix, pmax=m*n, or pmax=0 if the // matrix has no entries. GB_PUBLIC GrB_Index GxB_Matrix_Iterator_getpmax (GxB_Iterator iterator) ; //------------------------------------------------------------------------------ // GxB_Matrix_Iterator_seek: seek to a specific entry //------------------------------------------------------------------------------ // On input, the entry iterator must be already attached to a matrix via // GxB_Matrix_Iterator_attach; results are undefined if this condition is not // met. // The input p is in range 0 to pmax-1, which points to an entry in the matrix, // or p >= pmax if the iterator is exhausted, where pmax is the return value // from GxB_Matrix_Iterator_getpmax. // Returns GrB_SUCCESS if the iterator is at an entry that exists in the // matrix, or GxB_EXHAUSTED if the iterator is exhausted. GB_PUBLIC GrB_Info GxB_Matrix_Iterator_seek (GxB_Iterator iterator, GrB_Index p) ; //------------------------------------------------------------------------------ // GxB_Matrix_Iterator_next: move to the next entry of a matrix //------------------------------------------------------------------------------ // On input, the entry iterator must be already attached to a matrix via // GxB_Matrix_Iterator_attach, and the position of the iterator must also have // been defined by a prior call to GxB_Matrix_Iterator_seek or // GxB_Matrix_Iterator_next. Results are undefined if these conditions are not // met. // Returns GrB_SUCCESS if the iterator is at an entry that exists in the // matrix, or GxB_EXHAUSTED if the iterator is exhausted. GB_PUBLIC GrB_Info GxB_Matrix_Iterator_next (GxB_Iterator iterator) ; //------------------------------------------------------------------------------ // GxB_Matrix_Iterator_getp: get the current position of a matrix iterator //------------------------------------------------------------------------------ // On input, the entry iterator must be already attached to a matrix via // GxB_Matrix_Iterator_attach, and the position of the iterator must also have // been defined by a prior call to GxB_Matrix_Iterator_seek or // GxB_Matrix_Iterator_next. Results are undefined if these conditions are not // met. GB_PUBLIC GrB_Index GxB_Matrix_Iterator_getp (GxB_Iterator iterator) ; //------------------------------------------------------------------------------ // GxB_Matrix_Iterator_getIndex: get the row and column index of a matrix entry //------------------------------------------------------------------------------ // On input, the entry iterator must be already attached to a matrix via // GxB_Matrix_Iterator_attach, and the position of the iterator must also have // been defined by a prior call to GxB_Matrix_Iterator_seek or // GxB_Matrix_Iterator_next, with a return value of GrB_SUCCESS. Results are // undefined if these conditions are not met. GB_PUBLIC void GxB_Matrix_Iterator_getIndex ( GxB_Iterator iterator, GrB_Index *row, GrB_Index *col ) ; //============================================================================== // GxB_Vector_Iterator_*: iterate over the entries of a vector //============================================================================== /* Example usage: single thread iteration of a whole vector, one entry at at time // create an iterator GxB_Iterator iterator ; GxB_Iterator_new (&iterator) ; // attach it to the vector v, known to be type GrB_FP64 GrB_Info info = GxB_Vector_Iterator_attach (iterator, v, NULL) ; if (info < 0) { handle the failure ... } // seek to the first entry info = GxB_Vector_Iterator_seek (iterator, 0) ; while (info != GxB_EXHAUSTED) { // get the entry v(i) GrB_Index i = GxB_Vector_Iterator_getIndex (iterator) ; double vi = GxB_Iterator_get_FP64 (iterator) ; // move to the next entry in v info = GxB_Vector_Iterator_next (iterator) ; } GrB_free (&iterator) ; */ #undef GxB_Vector_Iterator_getpmax #undef GxB_Vector_Iterator_seek #undef GxB_Vector_Iterator_next #undef GxB_Vector_Iterator_getp #undef GxB_Vector_Iterator_getIndex //------------------------------------------------------------------------------ // GxB_Vector_Iterator_attach: attach an iterator to a vector //------------------------------------------------------------------------------ // On input, the iterator must already exist, having been created by // GxB_Iterator_new. // GxB_Vector_Iterator_attach attaches an iterator to a vector. If the // iterator is already attached to a vector or matrix, it is detached and then // attached to the given vector v. // The following error conditions are returned: // GrB_NULL_POINTER: if the iterator or v are NULL. // GrB_INVALID_OBJECT: if the vector v is invalid. // GrB_OUT_OF_MEMORY: if the method runs out of memory. // If successful, the iterator is attached to the vector, but not to any // specific entry. Use GxB_Vector_Iterator_seek to move the iterator to a // particular entry. GB_PUBLIC GrB_Info GxB_Vector_Iterator_attach ( GxB_Iterator iterator, GrB_Vector v, GrB_Descriptor desc ) ; //------------------------------------------------------------------------------ // GxB_Vector_Iterator_getpmax: return the range of the vector iterator //------------------------------------------------------------------------------ // On input, the iterator must be already attached to a vector via // GxB_Vector_Iterator_attach; results are undefined if this condition is not // met. // Entries in a vector are given an index p, ranging from 0 to pmax-1, where // pmax >= nvals(v). For sparse and full vectors, pmax is equal to nvals(v). // For a size-m bitmap vector, pmax=m, or pmax=0 if the vector has no entries. GB_PUBLIC GrB_Index GxB_Vector_Iterator_getpmax (GxB_Iterator iterator) ; #define GxB_Vector_Iterator_getpmax(iterator) \ ( \ (iterator->pmax) \ ) //------------------------------------------------------------------------------ // GxB_Vector_Iterator_seek: seek to a specific entry in the vector //------------------------------------------------------------------------------ // On input, the iterator must be already attached to a vector via // GxB_Vector_Iterator_attach; results are undefined if this condition is not // met. // The input p is in range 0 to pmax-1, which points to an entry in the vector, // or p >= pmax if the iterator is exhausted, where pmax is the return value // from GxB_Vector_Iterator_getpmax. // Returns GrB_SUCCESS if the iterator is at an entry that exists in the // vector, or GxB_EXHAUSTED if the iterator is exhausted. GB_PUBLIC GrB_Info GB_Vector_Iterator_bitmap_seek (GxB_Iterator iterator, GrB_Index p) ; GB_PUBLIC GrB_Info GxB_Vector_Iterator_seek (GxB_Iterator iterator, GrB_Index p) ; #define GB_Vector_Iterator_seek(iterator, q) \ ( \ (q >= iterator->pmax) ? \ ( \ /* the iterator is exhausted */ \ iterator->p = iterator->pmax, \ GxB_EXHAUSTED \ ) \ : \ ( \ /* seek to an arbitrary position in the vector */ \ iterator->p = q, \ (iterator->A_sparsity == GxB_BITMAP) ? \ ( \ GB_Vector_Iterator_bitmap_seek (iterator, q) \ ) \ : \ ( \ GrB_SUCCESS \ ) \ ) \ ) #define GxB_Vector_Iterator_seek(iterator, p) \ ( \ GB_Vector_Iterator_seek (iterator, p) \ ) //------------------------------------------------------------------------------ // GxB_Vector_Iterator_next: move to the next entry of a vector //------------------------------------------------------------------------------ // On input, the iterator must be already attached to a vector via // GxB_Vector_Iterator_attach, and the position of the iterator must also have // been defined by a prior call to GxB_Vector_Iterator_seek or // GxB_Vector_Iterator_next. Results are undefined if these conditions are not // met. // Returns GrB_SUCCESS if the iterator is at an entry that exists in the // vector, or GxB_EXHAUSTED if the iterator is exhausted. GB_PUBLIC GrB_Info GxB_Vector_Iterator_next (GxB_Iterator iterator) ; #define GB_Vector_Iterator_next(iterator) \ ( \ /* move to the next entry */ \ (++(iterator->p) >= iterator->pmax) ? \ ( \ /* the iterator is exhausted */ \ iterator->p = iterator->pmax, \ GxB_EXHAUSTED \ ) \ : \ ( \ GrB_SUCCESS \ ) \ ) #define GxB_Vector_Iterator_next(iterator) \ ( \ GB_Vector_Iterator_next (iterator) \ ) //------------------------------------------------------------------------------ // GxB_Vector_Iterator_getp: get the current position of a vector iterator //------------------------------------------------------------------------------ // On input, the iterator must be already attached to a vector via // GxB_Vector_Iterator_attach, and the position of the iterator must also have // been defined by a prior call to GxB_Vector_Iterator_seek or // GxB_Vector_Iterator_next. Results are undefined if these conditions are not // met. GB_PUBLIC GrB_Index GxB_Vector_Iterator_getp (GxB_Iterator iterator) ; #define GxB_Vector_Iterator_getp(iterator) \ ( \ (iterator->p) \ ) //------------------------------------------------------------------------------ // GxB_Vector_Iterator_getIndex: get the index of a vector entry //------------------------------------------------------------------------------ // On input, the iterator must be already attached to a vector via // GxB_Vector_Iterator_attach, and the position of the iterator must also have // been defined by a prior call to GxB_Vector_Iterator_seek or // GxB_Vector_Iterator_next, with a return value of GrB_SUCCESS. Results are // undefined if these conditions are not met. GB_PUBLIC GrB_Index GxB_Vector_Iterator_getIndex (GxB_Iterator iterator) ; #define GxB_Vector_Iterator_getIndex(iterator) \ ( \ ((iterator->Ai != NULL) ? iterator->Ai [iterator->p] : iterator->p) \ ) //============================================================================== // GxB_Iterator_get_TYPE: get value of the current entry for any iterator //============================================================================== // On input, the prior call to GxB_*Iterator_*seek*, or GxB_*Iterator_*next* // must have returned GrB_SUCCESS, indicating that the iterator is at a valid // current entry for either a matrix or vector. // Returns the value of the current entry at the position determined by the // iterator. No typecasting is permitted; the method name must match the // type of the matrix or vector. #undef GxB_Iterator_get_BOOL #undef GxB_Iterator_get_INT8 #undef GxB_Iterator_get_INT16 #undef GxB_Iterator_get_INT32 #undef GxB_Iterator_get_INT64 #undef GxB_Iterator_get_UINT8 #undef GxB_Iterator_get_UINT16 #undef GxB_Iterator_get_UINT32 #undef GxB_Iterator_get_UINT64 #undef GxB_Iterator_get_FP32 #undef GxB_Iterator_get_FP64 #undef GxB_Iterator_get_FC32 #undef GxB_Iterator_get_FC64 #undef GxB_Iterator_get_UDT GB_PUBLIC bool GxB_Iterator_get_BOOL (GxB_Iterator iterator) ; GB_PUBLIC int8_t GxB_Iterator_get_INT8 (GxB_Iterator iterator) ; GB_PUBLIC int16_t GxB_Iterator_get_INT16 (GxB_Iterator iterator) ; GB_PUBLIC int32_t GxB_Iterator_get_INT32 (GxB_Iterator iterator) ; GB_PUBLIC int64_t GxB_Iterator_get_INT64 (GxB_Iterator iterator) ; GB_PUBLIC uint8_t GxB_Iterator_get_UINT8 (GxB_Iterator iterator) ; GB_PUBLIC uint16_t GxB_Iterator_get_UINT16 (GxB_Iterator iterator) ; GB_PUBLIC uint32_t GxB_Iterator_get_UINT32 (GxB_Iterator iterator) ; GB_PUBLIC uint64_t GxB_Iterator_get_UINT64 (GxB_Iterator iterator) ; GB_PUBLIC float GxB_Iterator_get_FP32 (GxB_Iterator iterator) ; GB_PUBLIC double GxB_Iterator_get_FP64 (GxB_Iterator iterator) ; GB_PUBLIC GxB_FC32_t GxB_Iterator_get_FC32 (GxB_Iterator iterator) ; GB_PUBLIC GxB_FC64_t GxB_Iterator_get_FC64 (GxB_Iterator iterator) ; GB_PUBLIC void GxB_Iterator_get_UDT (GxB_Iterator iterator, void *value) ; #define GB_Iterator_get(iterator, type) \ ( \ (((type *) (iterator)->Ax) [(iterator)->iso ? 0 : (iterator)->p]) \ ) #define GxB_Iterator_get_BOOL(iterator) GB_Iterator_get (iterator, bool) #define GxB_Iterator_get_INT8(iterator) GB_Iterator_get (iterator, int8_t) #define GxB_Iterator_get_INT16(iterator) GB_Iterator_get (iterator, int16_t) #define GxB_Iterator_get_INT32(iterator) GB_Iterator_get (iterator, int32_t) #define GxB_Iterator_get_INT64(iterator) GB_Iterator_get (iterator, int64_t) #define GxB_Iterator_get_UINT8(iterator) GB_Iterator_get (iterator, uint8_t) #define GxB_Iterator_get_UINT16(iterator) GB_Iterator_get (iterator, uint16_t) #define GxB_Iterator_get_UINT32(iterator) GB_Iterator_get (iterator, uint32_t) #define GxB_Iterator_get_UINT64(iterator) GB_Iterator_get (iterator, uint64_t) #define GxB_Iterator_get_FP32(iterator) GB_Iterator_get (iterator, float) #define GxB_Iterator_get_FP64(iterator) GB_Iterator_get (iterator, double) #define GxB_Iterator_get_FC32(iterator) GB_Iterator_get (iterator, GxB_FC32_t) #define GxB_Iterator_get_FC64(iterator) GB_Iterator_get (iterator, GxB_FC64_t) #define GxB_Iterator_get_UDT(iterator, value) \ ( \ (void) memcpy ((void *) value, ((const uint8_t *) ((iterator)->Ax)) + \ ((iterator)->iso ? 0 : ((iterator)->type_size * (iterator)->p)), \ (iterator)->type_size) \ ) #endif
GB_unaryop__identity_fp32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp32_uint64 // op(A') function: GB_tran__identity_fp32_uint64 // C type: float // A type: uint64_t // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp32_uint64 ( float *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
blackscholes.c
// Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif // Multi-threaded pthreads header #ifdef ENABLE_THREADS // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp MAIN_ENV #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif #ifdef ENABLE_TBB #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/task_scheduler_init.h" #include "tbb/tick_count.h" using namespace std; using namespace tbb; #endif //ENABLE_TBB // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #endif //Precision to use for calculations #define fptype float #define NUM_RUNS 100 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int * otype; fptype * sptprice; fptype * strike; fptype * rate; fptype * volatility; fptype * otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF ( fptype InputX ) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv( fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet ) { fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log( sptprice / strike ); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF( d1 ); NofXd2 = CNDF( d2 ); FutureValueX = strike * ( exp( -(rate)*(time) ) ); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } #ifdef ENABLE_TBB struct mainWork { mainWork() {} mainWork(mainWork &w, tbb::split) {} void operator()(const tbb::blocked_range<int> &range) const { fptype price; int begin = range.begin(); int end = range.end(); for (int i=begin; i!=end; i++) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-5 ){ fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } }; #endif // ENABLE_TBB ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef ENABLE_TBB int bs_thread(void *tid_ptr) { int j; tbb::affinity_partitioner a; mainWork doall; for (j=0; j<NUM_RUNS; j++) { tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a); } return 0; } #else // !ENABLE_TBB #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr){ #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); for (j=0; j<NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i=0; i<numOptions; i++) { #else //ENABLE_OPENMP for (i=start; i<end; i++) { #endif //ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-4 ){ printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } return 0; } #endif //ENABLE_TBB int main (int argc, char **argv) { FILE *file; int i; int loopnum; fptype * buffer; int * buffer2; int rv; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif //PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif argc = 4; if (argc != 4) { printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); exit(1); } nThreads = 1; // atoi(argv[1]); char *inputFile = "/BKI.TXT"; // argv[2]; char *outputFile = "/OUT.TXT"; // argv[3]; //Read input data from file file = fopen(inputFile, "r"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", inputFile); exit(1); } rv = fscanf(file, "%i", &numOptions); if(rv != 1) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } if(nThreads > numOptions) { printf("WARNING: Not enough work, reducing number of threads to match number of options.\n"); nThreads = numOptions; } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB) if(nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); exit(1); } #endif // alloc spaces for the option data data = (OptionData*)malloc(numOptions*sizeof(OptionData)); prices = (fptype*)malloc(numOptions*sizeof(fptype)); for ( loopnum = 0; loopnum < numOptions; ++ loopnum ) { rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval); if(rv != 9) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", inputFile); exit(1); } #ifdef ENABLE_THREADS MAIN_INITENV(,8000000,nThreads); #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD); otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i=0; i<numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS #ifdef WIN32 HANDLE *threads; int *nums; threads = (HANDLE *) malloc (nThreads * sizeof(HANDLE)); nums = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); free(threads); free(nums); #else int *tids; tids = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { tids[i]=i; CREATE_WITH_ARG(bs_thread, &tids[i]); } WAIT_FOR_END(nThreads); free(tids); #endif //WIN32 #else //ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid=0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else //ENABLE_OPENMP #ifdef ENABLE_TBB tbb::task_scheduler_init init(nThreads); int tid=0; bs_thread(&tid); #else //ENABLE_TBB //serial version int tid=0; bs_thread(&tid); #endif //ENABLE_TBB #endif //ENABLE_OPENMP #endif //ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif //Write prices to output file file = fopen(outputFile, "w"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", outputFile); exit(1); } rv = fprintf(file, "%i\n", numOptions); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } for(i=0; i<numOptions; i++) { rv = fprintf(file, "%.18f\n", prices[i]); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", outputFile); exit(1); } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif return 0; }
deconvolution_packnto1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_packnto1_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl); const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn; int k = y * kernel_w + x; vfloat16m1_t _val = vle16_v_f16m1(sptr, vl); vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl); _sum = vfwmacc_vv_f32m2(_sum, _val, _w, vl); } } kptr += maxk * packn; } #if C906 // TODO std::vector<float> ss(packn); vse32_v_f32m2((float*)ss.data(), _sum, vl); for (int i = 0; i < packn; i++) { sum += ss[i]; } #else sum = vfmv_f_s_f32m1_f32(vfredusum_vs_f32m2_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl)); #endif sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } } static void deconvolution_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const __fp16* bias_data_ptr = bias_data_fp16; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __fp16 sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn; int k = y * kernel_w + x; vfloat16m1_t _val = vle16_v_f16m1(sptr, vl); vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl); _sum = vfmacc_vv_f16m1(_sum, _val, _w, vl); } } kptr += maxk * packn; } sum = vfmv_f_s_f16m1_f16(vfredusum_vs_f16m1_f16m1(vfloat16m1_t(), _sum, vfmv_s_f_f16m1(vfloat16m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
fourier.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF OOO U U RRRR IIIII EEEEE RRRR % % F O O U U R R I E R R % % FFF O O U U RRRR I EEE RRRR % % F O O U U R R I E R R % % F OOO UUU R R IIIII EEEEE R R % % % % % % MagickCore Discrete Fourier Transform Methods % % % % Software Design % % Sean Burke % % Fred Weinhaus % % John Cristy % % July 2009 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/fourier.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/pixel-accessor.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/thread-private.h" #if defined(MAGICKCORE_FFTW_DELEGATE) #if defined(MAGICKCORE_HAVE_COMPLEX_H) #include <complex.h> #endif #include <fftw3.h> #if !defined(MAGICKCORE_HAVE_CABS) #define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1])) #endif #if !defined(MAGICKCORE_HAVE_CARG) #define carg(z) (atan2(cimag(z),creal(z))) #endif #if !defined(MAGICKCORE_HAVE_CIMAG) #define cimag(z) (z[1]) #endif #if !defined(MAGICKCORE_HAVE_CREAL) #define creal(z) (z[0]) #endif #endif /* Typedef declarations. */ typedef struct _FourierInfo { ChannelType channel; MagickBooleanType modulus; size_t width, height; ssize_t center; } FourierInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r w a r d F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ForwardFourierTransformImage() implements the discrete Fourier transform % (DFT) of the image either as a magnitude / phase or real / imaginary image % pair. % % The format of the ForwadFourierTransformImage method is: % % Image *ForwardFourierTransformImage(const Image *image, % const MagickBooleanType modulus,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulus: if true, return as transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType RollFourier(const size_t width,const size_t height, const ssize_t x_offset,const ssize_t y_offset,double *fourier) { double *roll; register ssize_t i, x; ssize_t u, v, y; /* Move zero frequency (DC, average color) from (0,0) to (width/2,height/2). */ roll=(double *) AcquireQuantumMemory((size_t) height,width*sizeof(*roll)); if (roll == (double *) NULL) return(MagickFalse); i=0L; for (y=0L; y < (ssize_t) height; y++) { if (y_offset < 0L) v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset; else v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height : y+y_offset; for (x=0L; x < (ssize_t) width; x++) { if (x_offset < 0L) u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset; else u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width : x+x_offset; roll[v*width+u]=fourier[i++]; } } (void) CopyMagickMemory(fourier,roll,height*width*sizeof(*roll)); roll=(double *) RelinquishMagickMemory(roll); return(MagickTrue); } static MagickBooleanType ForwardQuadrantSwap(const size_t width, const size_t height,double *source,double *destination) { MagickBooleanType status; register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) floor((double) width/2L)+1L; status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,source); if (status == MagickFalse) return(MagickFalse); for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L-1L); x++) destination[width*y+x+width/2L]=source[center*y+x]; for (y=1; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L-1L); x++) destination[width*(height-y)+width/2L-x-1L]=source[center*y+x+1L]; for (x=0L; x < (ssize_t) (width/2L); x++) destination[-x+width/2L-1L]=destination[x+width/2L+1L]; return(MagickTrue); } static void CorrectPhaseLHS(const size_t width,const size_t height, double *fourier) { register ssize_t x; ssize_t y; for (y=0L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L); x++) fourier[y*width+x]*=(-1.0); } static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info, Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude_source, *phase_source; Image *magnitude_image, *phase_image; MagickBooleanType status; register IndexPacket *indexes; register PixelPacket *q; register ssize_t x; ssize_t i, y; magnitude_image=GetFirstImageInList(image); phase_image=GetNextImageInList(image); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",image->filename); return(MagickFalse); } /* Create "Fourier Transform" image from constituent arrays. */ magnitude_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height,fourier_info->width*sizeof(*magnitude_source)); if (magnitude_source == (double *) NULL) return(MagickFalse); (void) ResetMagickMemory(magnitude_source,0,fourier_info->height* fourier_info->width*sizeof(*magnitude_source)); phase_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase_source)); if (phase_source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); return(MagickFalse); } (void) ResetMagickMemory(phase_source,0,fourier_info->height* fourier_info->width*sizeof(*phase_source)); status=ForwardQuadrantSwap(fourier_info->height,fourier_info->height, magnitude,magnitude_source); if (status != MagickFalse) status=ForwardQuadrantSwap(fourier_info->height,fourier_info->height,phase, phase_source); CorrectPhaseLHS(fourier_info->height,fourier_info->height,phase_source); if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_source[i]/=(2.0*MagickPI); phase_source[i]+=0.5; i++; } } magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception); i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->height,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange* magnitude_source[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(magnitude_view,exception); if (status == MagickFalse) break; } magnitude_view=DestroyCacheView(magnitude_view); i=0L; phase_view=AcquireAuthenticCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->height,1UL, exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange*phase_source[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange*phase_source[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange*phase_source[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange*phase_source[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*phase_source[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*phase_source[i])); break; } } i++; q++; } status=SyncCacheViewAuthenticPixels(phase_view,exception); if (status == MagickFalse) break; } phase_view=DestroyCacheView(phase_view); phase_source=(double *) RelinquishMagickMemory(phase_source); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); return(status); } static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info, const Image *image,double *magnitude,double *phase,ExceptionInfo *exception) { CacheView *image_view; double n, *source; fftw_complex *fourier; fftw_plan fftw_r2c_plan; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Generate the forward Fourier transform. */ source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*source)); if (source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } ResetMagickMemory(source,0,fourier_info->height*fourier_info->width* sizeof(*source)); i=0L; image_view=AcquireVirtualCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { source[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { source[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { source[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { source[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { source[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { source[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } image_view=DestroyCacheView(image_view); fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->center*sizeof(*fourier)); if (fourier == (fftw_complex *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); source=(double *) RelinquishMagickMemory(source); return(MagickFalse); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ForwardFourierTransform) #endif fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height, source,fourier,FFTW_ESTIMATE); fftw_execute(fftw_r2c_plan); fftw_destroy_plan(fftw_r2c_plan); source=(double *) RelinquishMagickMemory(source); /* Normalize Fourier transform. */ n=(double) fourier_info->width*(double) fourier_info->width; i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier[i]/=n; #else fourier[i][0]/=n; fourier[i][1]/=n; #endif i++; } /* Generate magnitude and phase (or real and imaginary). */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude[i]=cabs(fourier[i]); phase[i]=carg(fourier[i]); i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { magnitude[i]=creal(fourier[i]); phase[i]=cimag(fourier[i]); i++; } fourier=(fftw_complex *) RelinquishMagickMemory(fourier); return(MagickTrue); } static MagickBooleanType ForwardFourierTransformChannel(const Image *image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude, *phase; fftw_complex *fourier; FourierInfo fourier_info; MagickBooleanType status; size_t extent; fourier_info.width=image->columns; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent=image->columns < image->rows ? image->rows : image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) floor((double) fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*magnitude)); if (magnitude == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } phase=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*phase)); if (phase == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*fourier)); if (fourier == (fftw_complex *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } status=ForwardFourierTransform(&fourier_info,image,magnitude,phase,exception); if (status != MagickFalse) status=ForwardFourier(&fourier_info,fourier_image,magnitude,phase, exception); fourier=(fftw_complex *) RelinquishMagickMemory(fourier); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(status); } #endif MagickExport Image *ForwardFourierTransformImage(const Image *image, const MagickBooleanType modulus,ExceptionInfo *exception) { Image *fourier_image; fourier_image=NewImageList(); #if !defined(MAGICKCORE_FFTW_DELEGATE) (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", image->filename); #else { Image *magnitude_image; size_t extent, width; width=image->columns; if ((image->columns != image->rows) || ((image->columns % 2) != 0) || ((image->rows % 2) != 0)) { extent=image->columns < image->rows ? image->rows : image->columns; width=(extent & 0x01) == 1 ? extent+1UL : extent; } magnitude_image=CloneImage(image,width,width,MagickFalse,exception); if (magnitude_image != (Image *) NULL) { Image *phase_image; magnitude_image->storage_class=DirectClass; magnitude_image->depth=32UL; phase_image=CloneImage(image,width,width,MagickFalse,exception); if (phase_image == (Image *) NULL) magnitude_image=DestroyImage(magnitude_image); else { MagickBooleanType is_gray, status; phase_image->storage_class=DirectClass; phase_image->depth=32UL; AppendImageToList(&fourier_image,magnitude_image); AppendImageToList(&fourier_image,phase_image); status=MagickTrue; is_gray=IsGrayImage(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=ForwardFourierTransformChannel(image, GrayChannels,modulus,fourier_image,exception); else thread_status=ForwardFourierTransformChannel(image, RedChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=ForwardFourierTransformChannel(image, BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->matte != MagickFalse) thread_status=ForwardFourierTransformChannel(image, OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (image->colorspace == CMYKColorspace) thread_status=ForwardFourierTransformChannel(image, IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImageList(fourier_image); fftw_cleanup(); } } } #endif return(fourier_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e F o u r i e r T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InverseFourierTransformImage() implements the inverse discrete Fourier % transform (DFT) of the image either as a magnitude / phase or real / % imaginary image pair. % % The format of the InverseFourierTransformImage method is: % % Image *InverseFourierTransformImage(const Image *magnitude_image, % const Image *phase_image,const MagickBooleanType modulus, % ExceptionInfo *exception) % % A description of each parameter follows: % % o magnitude_image: the magnitude or real image. % % o phase_image: the phase or imaginary image. % % o modulus: if true, return transform as a magnitude / phase pair % otherwise a real / imaginary image pair. % % o exception: return any errors or warnings in this structure. % */ #if defined(MAGICKCORE_FFTW_DELEGATE) static MagickBooleanType InverseQuadrantSwap(const size_t width, const size_t height,const double *source,double *destination) { register ssize_t x; ssize_t center, y; /* Swap quadrants. */ center=(ssize_t) floor((double) width/2L)+1L; for (y=1L; y < (ssize_t) height; y++) for (x=0L; x < (ssize_t) (width/2L+1L); x++) destination[center*(height-y)-x+width/2L]=source[y*width+x]; for (y=0L; y < (ssize_t) height; y++) destination[center*y]=source[y*width+width/2L]; for (x=0L; x < center; x++) destination[x]=source[center-x-1L]; return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination)); } static MagickBooleanType InverseFourier(FourierInfo *fourier_info, const Image *magnitude_image,const Image *phase_image,fftw_complex *fourier, ExceptionInfo *exception) { CacheView *magnitude_view, *phase_view; double *magnitude, *phase, *magnitude_source, *phase_source; MagickBooleanType status; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Inverse fourier - read image and break down into a double array. */ magnitude_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height,fourier_info->width*sizeof(*magnitude_source)); if (magnitude_source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } phase_source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase_source)); if (phase_source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); return(MagickFalse); } i=0L; magnitude_view=AcquireVirtualCacheView(magnitude_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(magnitude_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { magnitude_source[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { magnitude_source[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { magnitude_source[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { magnitude_source[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { magnitude_source[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { magnitude_source[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } i=0L; phase_view=AcquireVirtualCacheView(phase_image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1, exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(phase_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { switch (fourier_info->channel) { case RedChannel: default: { phase_source[i]=QuantumScale*GetPixelRed(p); break; } case GreenChannel: { phase_source[i]=QuantumScale*GetPixelGreen(p); break; } case BlueChannel: { phase_source[i]=QuantumScale*GetPixelBlue(p); break; } case OpacityChannel: { phase_source[i]=QuantumScale*GetPixelOpacity(p); break; } case IndexChannel: { phase_source[i]=QuantumScale*GetPixelIndex(indexes+x); break; } case GrayChannels: { phase_source[i]=QuantumScale*GetPixelGray(p); break; } } i++; p++; } } if (fourier_info->modulus != MagickFalse) { i=0L; for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->width; x++) { phase_source[i]-=0.5; phase_source[i]*=(2.0*MagickPI); i++; } } magnitude_view=DestroyCacheView(magnitude_view); phase_view=DestroyCacheView(phase_view); magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->center*sizeof(*magnitude)); if (magnitude == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); phase_source=(double *) RelinquishMagickMemory(phase_source); return(MagickFalse); } status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, magnitude_source,magnitude); magnitude_source=(double *) RelinquishMagickMemory(magnitude_source); phase=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*phase)); if (phase == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); phase_source=(double *) RelinquishMagickMemory(phase_source); return(MagickFalse); } CorrectPhaseLHS(fourier_info->width,fourier_info->width,phase_source); if (status != MagickFalse) status=InverseQuadrantSwap(fourier_info->width,fourier_info->height, phase_source,phase); phase_source=(double *) RelinquishMagickMemory(phase_source); /* Merge two sets. */ i=0L; if (fourier_info->modulus != MagickFalse) for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier[i]=magnitude[i]*cos(phase[i])+I*magnitude[i]*sin(phase[i]); #else fourier[i][0]=magnitude[i]*cos(phase[i]); fourier[i][1]=magnitude[i]*sin(phase[i]); #endif i++; } else for (y=0L; y < (ssize_t) fourier_info->height; y++) for (x=0L; x < (ssize_t) fourier_info->center; x++) { #if defined(MAGICKCORE_HAVE_COMPLEX_H) fourier[i]=magnitude[i]+I*phase[i]; #else fourier[i][0]=magnitude[i]; fourier[i][1]=phase[i]; #endif i++; } phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(status); } static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info, fftw_complex *fourier,Image *image,ExceptionInfo *exception) { CacheView *image_view; double *source; fftw_plan fftw_c2r_plan; register IndexPacket *indexes; register PixelPacket *q; register ssize_t i, x; ssize_t y; source=(double *) AcquireQuantumMemory((size_t) fourier_info->height, fourier_info->width*sizeof(*source)); if (source == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_InverseFourierTransform) #endif { fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height, fourier,source,FFTW_ESTIMATE); fftw_execute(fftw_c2r_plan); fftw_destroy_plan(fftw_c2r_plan); } i=0L; image_view=AcquireAuthenticCacheView(image,exception); for (y=0L; y < (ssize_t) fourier_info->height; y++) { if (y >= (ssize_t) image->rows) break; q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width > image->columns ? image->columns : fourier_info->width,1UL,exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0L; x < (ssize_t) fourier_info->width; x++) { if (x < (ssize_t) image->columns) switch (fourier_info->channel) { case RedChannel: default: { SetPixelRed(q,ClampToQuantum(QuantumRange*source[i])); break; } case GreenChannel: { SetPixelGreen(q,ClampToQuantum(QuantumRange*source[i])); break; } case BlueChannel: { SetPixelBlue(q,ClampToQuantum(QuantumRange*source[i])); break; } case OpacityChannel: { SetPixelOpacity(q,ClampToQuantum(QuantumRange*source[i])); break; } case IndexChannel: { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*source[i])); break; } case GrayChannels: { SetPixelGray(q,ClampToQuantum(QuantumRange*source[i])); break; } } i++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } image_view=DestroyCacheView(image_view); source=(double *) RelinquishMagickMemory(source); return(MagickTrue); } static MagickBooleanType InverseFourierTransformChannel( const Image *magnitude_image,const Image *phase_image, const ChannelType channel,const MagickBooleanType modulus, Image *fourier_image,ExceptionInfo *exception) { double *magnitude, *phase; fftw_complex *fourier; FourierInfo fourier_info; MagickBooleanType status; size_t extent; fourier_info.width=magnitude_image->columns; if ((magnitude_image->columns != magnitude_image->rows) || ((magnitude_image->columns % 2) != 0) || ((magnitude_image->rows % 2) != 0)) { extent=magnitude_image->columns < magnitude_image->rows ? magnitude_image->rows : magnitude_image->columns; fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent; } fourier_info.height=fourier_info.width; fourier_info.center=(ssize_t) floor((double) fourier_info.width/2L)+1L; fourier_info.channel=channel; fourier_info.modulus=modulus; magnitude=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*magnitude)); if (magnitude == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); return(MagickFalse); } phase=(double *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*phase)); if (phase == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } fourier=(fftw_complex *) AcquireQuantumMemory((size_t) fourier_info.height, fourier_info.center*sizeof(*fourier)); if (fourier == (fftw_complex *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", magnitude_image->filename); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(MagickFalse); } status=InverseFourier(&fourier_info,magnitude_image,phase_image,fourier, exception); if (status != MagickFalse) status=InverseFourierTransform(&fourier_info,fourier,fourier_image, exception); fourier=(fftw_complex *) RelinquishMagickMemory(fourier); phase=(double *) RelinquishMagickMemory(phase); magnitude=(double *) RelinquishMagickMemory(magnitude); return(status); } #endif MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image, const Image *phase_image,const MagickBooleanType modulus, ExceptionInfo *exception) { Image *fourier_image; assert(magnitude_image != (Image *) NULL); assert(magnitude_image->signature == MagickSignature); if (magnitude_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", magnitude_image->filename); if (phase_image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageSequenceRequired","`%s'",magnitude_image->filename); return((Image *) NULL); } #if !defined(MAGICKCORE_FFTW_DELEGATE) fourier_image=(Image *) NULL; (void) modulus; (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)", magnitude_image->filename); #else { fourier_image=CloneImage(magnitude_image,magnitude_image->columns, magnitude_image->rows,MagickFalse,exception); if (fourier_image != (Image *) NULL) { MagickBooleanType is_gray, status; status=MagickTrue; is_gray=IsGrayImage(magnitude_image,exception); if (is_gray != MagickFalse) is_gray=IsGrayImage(phase_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel sections #endif { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; if (is_gray != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GrayChannels,modulus,fourier_image,exception); else thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,RedChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,GreenChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (is_gray == MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,BlueChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->matte != MagickFalse) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,OpacityChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp section #endif { MagickBooleanType thread_status; thread_status=MagickTrue; if (magnitude_image->colorspace == CMYKColorspace) thread_status=InverseFourierTransformChannel(magnitude_image, phase_image,IndexChannel,modulus,fourier_image,exception); if (thread_status == MagickFalse) status=thread_status; } } if (status == MagickFalse) fourier_image=DestroyImage(fourier_image); } fftw_cleanup(); } #endif return(fourier_image); }
declare_reduction_codegen.c
// RUN: %clang_cc1 -verify -fopenmp -x c -emit-llvm %s -triple %itanium_abi_triple -o - -femit-all-decls -disable-llvm-optzns | FileCheck %s // RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-optzns // RUN: %clang_cc1 -fopenmp -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-optzns | FileCheck --check-prefix=CHECK-LOAD %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK: [[SSS_INT:.+]] = type { i32 } // CHECK-LOAD: [[SSS_INT:.+]] = type { i32 } #pragma omp declare reduction(+ : int, char : omp_out *= omp_in) // CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias) // CHECK: [[MUL:%.+]] = mul nsw i32 // CHECK-NEXT: store i32 [[MUL]], i32* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias) // CHECK-LOAD: [[MUL:%.+]] = mul nsw i32 // CHECK-LOAD-NEXT: store i32 [[MUL]], i32* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias, i8* noalias) // CHECK: sext i8 // CHECK: sext i8 // CHECK: [[MUL:%.+]] = mul nsw i32 // CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8 // CHECK-NEXT: store i8 [[TRUNC]], i8* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias, i8* noalias) // CHECK-LOAD: sext i8 // CHECK-LOAD: sext i8 // CHECK-LOAD: [[MUL:%.+]] = mul nsw i32 // CHECK-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8 // CHECK-LOAD-NEXT: store i8 [[TRUNC]], i8* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } #pragma omp declare reduction(fun : float : omp_out += omp_in) initializer(omp_priv = 15 + omp_orig) // CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias, float* noalias) // CHECK: [[ADD:%.+]] = fadd float // CHECK-NEXT: store float [[ADD]], float* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias, float* noalias) // CHECK: [[ADD:%.+]] = fadd float 1.5 // CHECK-NEXT: store float [[ADD]], float* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias, float* noalias) // CHECK-LOAD: [[ADD:%.+]] = fadd float // CHECK-LOAD-NEXT: store float [[ADD]], float* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias, float* noalias) // CHECK-LOAD: [[ADD:%.+]] = fadd float 1.5 // CHECK-LOAD-NEXT: store float [[ADD]], float* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } struct SSS { int field; #pragma omp declare reduction(+ : int, char : omp_out *= omp_in) // CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias) // CHECK: [[MUL:%.+]] = mul nsw i32 // CHECK-NEXT: store i32 [[MUL]], i32* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias, i8* noalias) // CHECK: sext i8 // CHECK: sext i8 // CHECK: [[MUL:%.+]] = mul nsw i32 // CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8 // CHECK-NEXT: store i8 [[TRUNC]], i8* // CHECK-NEXT: ret void // CHECK-NEXT: } }; void init(struct SSS *priv, struct SSS orig); #pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK: call void @llvm.memcpy // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK: call void @init( // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK-LOAD: call void @llvm.memcpy // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK-LOAD: call void @init( // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LABEL: @main // CHECK-LOAD-LABEL: @main int main() { #pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK: call void @llvm.memcpy // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK: call void @init( // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK-LOAD: call void @llvm.memcpy // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK-LOAD: call void @init( // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } { #pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK: call void @llvm.memcpy // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK: call void @init( // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK-LOAD: call void @llvm.memcpy // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias, [[SSS_INT]]* noalias) // CHECK-LOAD: call void @init( // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } } return 0; } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias) // CHECK-LOAD: [[MUL:%.+]] = mul nsw i32 // CHECK-LOAD-NEXT: store i32 [[MUL]], i32* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias, i8* noalias) // CHECK-LOAD: sext i8 // CHECK-LOAD: sext i8 // CHECK-LOAD: [[MUL:%.+]] = mul nsw i32 // CHECK-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8 // CHECK-LOAD-NEXT: store i8 [[TRUNC]], i8* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } #endif
main.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "omp.h" #include "functions.h" int main (int argc, char **argv) { int Nthreads = 1; omp_set_num_threads(Nthreads); //seed value for the randomizer double seed = clock(); //this will make your program run differently everytime //double seed = 0; //uncomment this and your program will behave the same everytime it's run srand(seed); //declare storage for an ElGamal cryptosytem unsigned int p, g, h, x; //begin with rank 0 getting user's input unsigned int n; printf("Enter a number of bits: "); fflush(stdout); char status = scanf("%u",&n); //make sure the input makes sense if ((n<9)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars) printf("Unsupported bit size.\n"); return 0; } printf("\n"); //setup an ElGamal cryptosystem setupElGamal(n,&p,&g,&h,&x); int bufferSize = 1024; unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char)); //populate the string with a message strcpy(message, "Hello, this is the message as a string."); printf("Message = \"%s\"\n", message); /* Q1.1 Finish this line */ unsigned int charsPerInt = (n-1)/8; padString(message, charsPerInt); printf("Padded Message = \"%s\"\n", message); unsigned int Nchars = strlen(message); unsigned int Nints = strlen(message)/charsPerInt; //storage for message as elements of Z_p unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); //storage for extra encryption coefficient unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); // cast the string into an unsigned int array convertStringToZ(message, Nchars, Zmessage, Nints); //Encrypt the Zmessage with the ElGamal cyrptographic system ElGamalEncrypt(Zmessage,a,Nints,p,g,h); printf("The encrypted text is: "); for (unsigned int i=0;i<Nints;i++) { printf("(%u,%u) ", Zmessage[i], a[i]); } printf("]\n"); //Decrypt the Zmessage with the ElGamal cyrptographic system ElGamalDecrypt(Zmessage,a,Nints,p,x); convertZToString(Zmessage, Nints, message, Nchars); printf("Decrypted Message = \"%s\"\n", message); printf("\n"); //Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel printf("Using %d OpenMP threads to find the secret key...\n", Nthreads); /* Q2.3 Parallelize this loop with OpenMP */ double startTime = omp_get_wtime(); unsigned int found = 0; #pragma omp parallel for shared(found) for (unsigned int i=0;i<p-1;i++) { if (found == 0 && modExp(g,i+1,p)==h) { printf("Secret key found! x = %u \n", i+1); found = 1; } } double endTime = omp_get_wtime(); double totalTime = endTime-startTime; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); return 0; }
LAGraph_BF_full2.c
//------------------------------------------------------------------------------ // LAGraph_BF_full2.c: Bellman-Ford single-source shortest paths, returns tree, // while diagonal of input matrix A needs not to be explicit 0, using the // frontier idea from Roi Lipman //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause // // See additional acknowledgments in the LICENSE file, // or contact permission@sei.cmu.edu for the full terms. //------------------------------------------------------------------------------ // LAGraph_BF_full2: Bellman-Ford single source shortest paths, returning both // the path lengths and the shortest-path tree. contributed by Jinhao Chen and // Tim Davis, Texas A&M. // LAGraph_BF_full2 performs a Bellman-Ford to find out shortest path, parent // nodes along the path and the hops (number of edges) in the path from given // source vertex s in the range of [0, n) on graph given as matrix A with size // n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i // to vertex j with weight w, then A(i, j) = w. // LAGraph_BF_full2 returns GrB_SUCCESS if it succeeds. In this case, there // are no negative-weight cycles in the graph, and d, pi, and h are returned. // The vector d has d(k) as the shortest distance from s to k. pi(k) = p+1, // where p is the parent node of k-th node in the shortest path. In particular, // pi(s) = 0. h(k) = hop(s, k), the number of edges from s to k in the shortest // path. // If the graph has a negative-weight cycle, GrB_NO_VALUE is returned, and the // GrB_Vectors d(k), pi(k) and h(k) (i.e., *pd_output, *ppi_output and // *ph_output respectively) will be NULL when negative-weight cycle detected. // Otherwise, other errors such as GrB_OUT_OF_MEMORY, GrB_INVALID_OBJECT, and // so on, can be returned, if these errors are found by the underlying // GrB_* functions. //------------------------------------------------------------------------------ #define LAGraph_FREE_WORK \ { \ GrB_free(&d); \ GrB_free(&dtmp); \ GrB_free(&dfrontier); \ GrB_free(&Atmp); \ GrB_free(&BF_Tuple3); \ GrB_free(&BF_lMIN_Tuple3); \ GrB_free(&BF_PLUSrhs_Tuple3); \ GrB_free(&BF_EQ_Tuple3); \ GrB_free(&BF_lMIN_Tuple3_Monoid); \ GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \ LAGraph_Free ((void**)&I); \ LAGraph_Free ((void**)&J); \ LAGraph_Free ((void**)&w); \ LAGraph_Free ((void**)&W); \ LAGraph_Free ((void**)&h); \ LAGraph_Free ((void**)&pi); \ } #define LAGraph_FREE_ALL \ { \ LAGraph_FREE_WORK \ GrB_free (pd_output); \ GrB_free (ppi_output); \ GrB_free (ph_output); \ } #include <LAGraph.h> #include <LAGraphX.h> #include <LG_internal.h> // from src/utility typedef void (*LAGraph_binary_function) (void *, const void *, const void *) ; //------------------------------------------------------------------------------ // data type for each entry of the adjacent matrix A and "distance" vector d; // <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and // the value <0, 0, NULL> corresponds to a path from a vertex to itself //------------------------------------------------------------------------------ typedef struct { double w; // w corresponds to a path weight. GrB_Index h; // h corresponds to a path size or number of hops. GrB_Index pi;// pi corresponds to the penultimate vertex along a path. // vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil) // for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E } BF2_Tuple3_struct; //------------------------------------------------------------------------------ // binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3 //------------------------------------------------------------------------------ void BF2_lMIN2 ( BF2_Tuple3_struct *z, const BF2_Tuple3_struct *x, const BF2_Tuple3_struct *y ) { if (x->w < y->w || (x->w == y->w && x->h < y->h) || (x->w == y->w && x->h == y->h && x->pi < y->pi)) { if (z != x) { *z = *x; } } else { *z = *y; } } void BF2_PLUSrhs2 ( BF2_Tuple3_struct *z, const BF2_Tuple3_struct *x, const BF2_Tuple3_struct *y ) { z->w = x->w + y->w ; z->h = x->h + y->h ; z->pi = (x->pi != UINT64_MAX && y->pi != 0) ? y->pi : x->pi ; } void BF2_EQ ( bool *z, const BF2_Tuple3_struct *x, const BF2_Tuple3_struct *y ) { (*z) = (x->w == y->w && x->h == y->h && x->pi == y->pi) ; } // Given a n-by-n adjacency matrix A and a source vertex s. // If there is no negative-weight cycle reachable from s, return the distances // of shortest paths from s and parents along the paths as vector d. Otherwise, // returns d=NULL if there is a negtive-weight cycle. // pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the // sum of edges length in the shortest path // ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the // parent of i-th vertex in the shortest path // ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the // number of edges from s to i in the shortest path // A has weights on corresponding entries of edges // s is given index for source vertex GrB_Info LAGraph_BF_full2 ( GrB_Vector *pd_output, //the pointer to the vector of distance GrB_Vector *ppi_output, //the pointer to the vector of parent GrB_Vector *ph_output, //the pointer to the vector of hops const GrB_Matrix A, //matrix for the graph const GrB_Index s //given index of the source ) { GrB_Info info; char *msg = NULL ; // tmp vector to store distance vector after n (i.e., V) loops GrB_Vector d = NULL, dtmp = NULL, dfrontier = NULL; GrB_Matrix Atmp = NULL; GrB_Type BF_Tuple3; GrB_BinaryOp BF_lMIN_Tuple3; GrB_BinaryOp BF_PLUSrhs_Tuple3; GrB_BinaryOp BF_EQ_Tuple3; GrB_Monoid BF_lMIN_Tuple3_Monoid; GrB_Semiring BF_lMIN_PLUSrhs_Tuple3; GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A GrB_Index *h = NULL, *pi = NULL; double *w = NULL; BF2_Tuple3_struct *W = NULL; LG_CHECK (A == NULL || pd_output == NULL || ppi_output == NULL || ph_output == NULL, -1001, "inputs are NULL") ; *pd_output = NULL; *ppi_output = NULL; *ph_output = NULL; GrB_TRY (GrB_Matrix_nrows (&nrows, A)) ; GrB_TRY (GrB_Matrix_ncols (&ncols, A)) ; GrB_TRY (GrB_Matrix_nvals (&nz, A)); LG_CHECK (nrows != ncols, -1002, "A must be square") ; n = nrows; LG_CHECK (s >= n || s < 0, -1003, "invalid source node") ; //-------------------------------------------------------------------------- // create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring //-------------------------------------------------------------------------- // GrB_Type GrB_TRY (GrB_Type_new(&BF_Tuple3, sizeof(BF2_Tuple3_struct))); // GrB_BinaryOp GrB_TRY (GrB_BinaryOp_new(&BF_EQ_Tuple3, (LAGraph_binary_function) (&BF2_EQ), GrB_BOOL, BF_Tuple3, BF_Tuple3)); GrB_TRY (GrB_BinaryOp_new(&BF_lMIN_Tuple3, (LAGraph_binary_function) (&BF2_lMIN2), BF_Tuple3, BF_Tuple3, BF_Tuple3)); GrB_TRY (GrB_BinaryOp_new(&BF_PLUSrhs_Tuple3, (LAGraph_binary_function)(&BF2_PLUSrhs2), BF_Tuple3, BF_Tuple3, BF_Tuple3)); // GrB_Monoid BF2_Tuple3_struct BF_identity = (BF2_Tuple3_struct) { .w = INFINITY, .h = UINT64_MAX, .pi = UINT64_MAX }; LAGRAPH_OK(GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3, &BF_identity)); //GrB_Semiring GrB_TRY (GrB_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3, BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3)); //-------------------------------------------------------------------------- // allocate arrays used for tuplets //-------------------------------------------------------------------------- I = LAGraph_Malloc (nz, sizeof(GrB_Index)) ; J = LAGraph_Malloc (nz, sizeof(GrB_Index)) ; w = LAGraph_Malloc (nz, sizeof(double)) ; W = LAGraph_Malloc (nz, sizeof(BF2_Tuple3_struct)) ; LG_CHECK (I == NULL || J == NULL || w == NULL || W == NULL, -1004, "out of memory") ; //-------------------------------------------------------------------------- // create matrix Atmp based on A, while its entries become BF_Tuple3 type //-------------------------------------------------------------------------- LAGRAPH_OK(GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A)); int nthreads; LAGRAPH_OK( LAGraph_GetNumThreads (&nthreads, NULL)) ; printf ("nthreads %d\n", nthreads) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index k = 0; k < nz; k++) { if (w[k] == 0) //diagonal entries { W[k] = (BF2_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 }; } else { W[k] = (BF2_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 }; } } GrB_TRY (GrB_Matrix_new(&Atmp, BF_Tuple3, n, n)); LAGRAPH_OK(GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3)); LAGraph_Free ((void**)&I); LAGraph_Free ((void**)&J); LAGraph_Free ((void**)&W); LAGraph_Free ((void**)&w); //-------------------------------------------------------------------------- // create and initialize "distance" vector d //-------------------------------------------------------------------------- GrB_TRY (GrB_Vector_new(&d, BF_Tuple3, n)); // initial distance from s to itself BF2_Tuple3_struct d0 = (BF2_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 }; LAGRAPH_OK(GrB_Vector_setElement_UDT(d, &d0, s)); //-------------------------------------------------------------------------- // start the Bellman Ford process //-------------------------------------------------------------------------- // copy d to dtmp in order to create a same size of vector GrB_TRY (GrB_Vector_dup(&dtmp, d)); GrB_TRY (GrB_Vector_dup(&dfrontier, d)); bool same= false; // variable indicating if d == dtmp int64_t iter = 0; // number of iterations // terminate when no new path is found or more than V-1 loops while (!same && iter < n - 1) { // execute semiring on d and A, and save the result to dtmp GrB_TRY (GrB_vxm(dfrontier, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3, dfrontier, Atmp, GrB_NULL)); // dtmp[i] = min(d[i], dfrontier[i]). GrB_Vector_eWiseAdd_BinaryOp(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_Tuple3, d, dfrontier, GrB_NULL); LAGRAPH_OK (LAGraph_Vector_IsEqual_op(&same, dtmp, d, BF_EQ_Tuple3, NULL)); if (!same) { GrB_Vector ttmp = dtmp; dtmp = d; d = ttmp; } iter ++; } // check for negative-weight cycle only when there was a new path in the // last loop, otherwise, there can't be a negative-weight cycle. if (!same) { // execute semiring again to check for negative-weight cycle GrB_TRY (GrB_vxm(dfrontier, GrB_NULL, GrB_NULL, BF_lMIN_PLUSrhs_Tuple3, dfrontier, Atmp, GrB_NULL)); // dtmp[i] = min(d[i], dfrontier[i]). GrB_Vector_eWiseAdd_BinaryOp(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_Tuple3, d, dfrontier, GrB_NULL); // if d != dtmp, then there is a negative-weight cycle in the graph LAGRAPH_OK (LAGraph_Vector_IsEqual_op(&same, dtmp, d, BF_EQ_Tuple3, NULL)); if (!same) { // printf("A negative-weight cycle found. \n"); LAGraph_FREE_ALL; return (GrB_NO_VALUE) ; } } //-------------------------------------------------------------------------- // extract tuple from "distance" vector d and create GrB_Vectors for output //-------------------------------------------------------------------------- I = LAGraph_Malloc (n, sizeof(GrB_Index)) ; W = LAGraph_Malloc (n, sizeof(BF2_Tuple3_struct)) ; w = LAGraph_Malloc (n, sizeof(double)) ; h = LAGraph_Malloc (n, sizeof(GrB_Index)) ; pi = LAGraph_Malloc (n, sizeof(GrB_Index)) ; LG_CHECK (I == NULL || W == NULL || w == NULL || h == NULL || pi == NULL, -1004, "out of memory") ; nz = n ; LAGRAPH_OK(GrB_Vector_extractTuples_UDT (I, (void *) W, &nz, d)); for (GrB_Index k = 0; k < nz; k++) { w [k] = W[k].w ; h [k] = W[k].h ; pi[k] = W[k].pi; } GrB_TRY (GrB_Vector_new(pd_output, GrB_FP64, n)); GrB_TRY (GrB_Vector_new(ppi_output, GrB_UINT64, n)); GrB_TRY (GrB_Vector_new(ph_output, GrB_UINT64, n)); GrB_TRY (GrB_Vector_build (*pd_output , I, w , nz, GrB_MIN_FP64 )); GrB_TRY (GrB_Vector_build (*ppi_output, I, pi, nz, GrB_MIN_UINT64)); GrB_TRY (GrB_Vector_build (*ph_output , I, h , nz, GrB_MIN_UINT64)); LAGraph_FREE_WORK; return (GrB_SUCCESS) ; }
composite.c
#pragma omp target teams distribute parallel for simd [clauses] for-loops
convolution_pack8_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); const signed char* kptr = weight_data_int8.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const signed char* sptr = m.row<signed char>(i * stride_h) + j * stride_w * 8; for (int k = 0; k < maxk; k++) { int8x8_t _val0 = vld1_dup_s8(sptr + space_ofs[k] * 8); int8x8_t _val1 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 1); int8x8_t _val2 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 2); int8x8_t _val3 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 3); int8x8_t _val4 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 4); int8x8_t _val5 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 5); int8x8_t _val6 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 6); int8x8_t _val7 = vld1_dup_s8(sptr + space_ofs[k] * 8 + 7); int8x8_t _w0 = vld1_s8(kptr); int8x8_t _w1 = vld1_s8(kptr + 8); int8x8_t _w2 = vld1_s8(kptr + 16); int8x8_t _w3 = vld1_s8(kptr + 24); int8x8_t _w4 = vld1_s8(kptr + 32); int8x8_t _w5 = vld1_s8(kptr + 40); int8x8_t _w6 = vld1_s8(kptr + 48); int8x8_t _w7 = vld1_s8(kptr + 56); int16x8_t _wv0 = vmull_s8(_w0, _val0); _wv0 = vmlal_s8(_wv0, _w1, _val1); int16x8_t _wv2 = vmull_s8(_w2, _val2); _wv2 = vmlal_s8(_wv2, _w3, _val3); int16x8_t _wv4 = vmull_s8(_w4, _val4); _wv4 = vmlal_s8(_wv4, _w5, _val5); int16x8_t _wv6 = vmull_s8(_w6, _val6); _wv6 = vmlal_s8(_wv6, _w7, _val7); _sum0 = vaddw_s16(_sum0, vget_low_s16(_wv0)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_wv0)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_wv2)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_wv2)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_wv4)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_wv4)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_wv6)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_wv6)); kptr += 64; } } vst1q_s32(outptr + j * 8, _sum0); vst1q_s32(outptr + j * 8 + 4, _sum1); } outptr += outw * 8; } } }
dense_pairwise.c
/* Copyright (c) 2016, 2021 Drew Schmidt All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Functions for computing covariance, (pearson) correlation, and cosine similarity #include <math.h> #include <stdlib.h> #include <string.h> #include "utils/safeomp.h" #include "coop.h" #include "utils/fill.h" #include "utils/inverse.h" #include "utils/special_vals.h" static inline void compute_sums(const int m, const size_t mi, const double *const restrict vec, const double *const restrict x, double *restrict sumx, double *restrict sumy, int *restrict len) { int k; *sumx = 0; *sumy = 0; *len = 0; PLEASE_VECTORIZE for (k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) { *sumx += vec[k]; *sumy += x[k + mi]; (*len)++; } } } // ----------------------------------------------------------------------------- // cosine // ----------------------------------------------------------------------------- int coop_cosine_mat_inplace_pairwise(const bool inv, const int m, const int n, const double *const restrict x, double *restrict cos) { int check; double *vec = malloc(m * sizeof(*vec)); CHECKMALLOC(vec); for (int j=0; j<n; j++) { const size_t mj = (size_t)m*j; memcpy(vec, x+mj, m*sizeof(*vec)); const size_t nj = (size_t)n*j; #pragma omp parallel for shared(j, vec, cos) if(m*n > OMP_MIN_SIZE) for (int i=j; i<n; i++) { const size_t mi = (size_t)m*i; double xx, xy, yy; xx = xy = yy = 0.0; int len = 0; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) { const double xval = vec[k]; const double yval = x[k + mi]; xx += xval * xval; yy += yval * yval; xy += xval * yval; len++; } } if (len == 0) { set_na_real(cos + (i + nj)); continue; } cos[i + nj] = xy / sqrt(xx * yy); } } free(vec); if (inv) { check = inv_sym_chol(n, cos); CHECKRET(check); } symmetrize(n, cos); return COOP_OK; } // ----------------------------------------------------------------------------- // correlation // ----------------------------------------------------------------------------- int coop_pcor_mat_inplace_pairwise(const bool inv, const int m, const int n, const double * const restrict x, double *restrict cor) { int check; double *vec = malloc(m * sizeof(*vec)); CHECKMALLOC(vec); for (int j=0; j<n; j++) { const size_t mj = (size_t)m*j; memcpy(vec, x+mj, m*sizeof(*vec)); const size_t nj = (size_t)n*j; #pragma omp parallel for shared(j, vec, cor) if(m*n > OMP_MIN_SIZE) for (int i=j; i<n; i++) { const size_t mi = (size_t)m*i; int len; double meanx, meany; compute_sums(m, mi, vec, x, &meanx, &meany, &len); if (len == 0 || len == 1) { set_na_real(cor + (i + nj)); set_na_real(cor + (j + (size_t)n*i)); continue; } const double dlen = (double) len; meanx /= dlen; meany /= dlen; double sdx = 0.; double sdy = 0.; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) { sdx += (vec[k] - meanx)*(vec[k] - meanx); sdy += (x[k + mi] - meany)*(x[k + mi] - meany); } } sdx = sqrt(sdx/(dlen-1.)); sdy = sqrt(sdy/(dlen-1.)); double mmcp = 0.0; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) mmcp += (vec[k] - meanx) * (x[k + mi] - meany); } cor[i + nj] = mmcp / sdx / sdy / (dlen - 1.0);; } } free(vec); if (inv) { check = inv_sym_chol(n, cor); CHECKRET(check); } symmetrize(n, cor); return COOP_OK; } // ----------------------------------------------------------------------------- // covariance // ----------------------------------------------------------------------------- int coop_covar_mat_inplace_pairwise(const bool inv, const int m, const int n, const double *const restrict x, double *restrict cov) { int check; double *vec = malloc(m * sizeof(*vec)); CHECKMALLOC(vec); for (int j=0; j<n; j++) { const size_t mj = (size_t)m*j; memcpy(vec, x+mj, m*sizeof(*vec)); const size_t nj = (size_t)n*j; #pragma omp parallel for shared(j, vec, cov) if(m*n > OMP_MIN_SIZE) for (int i=j; i<n; i++) { const size_t mi = (size_t)m*i; int len; double meanx, meany; compute_sums(m, mi, vec, x, &meanx, &meany, &len); if (len == 0) { set_na_real(cov + (i + nj)); set_na_real(cov + (j + (size_t)n*i)); continue; } meanx /= (double) len; meany /= (double) len; double mmcp = 0.0; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) mmcp += (vec[k] - meanx) * (x[k + mi] - meany); } cov[i + nj] = mmcp * ((double) 1.0/(len-1)); } } free(vec); if (inv) { check = inv_sym_chol(n, cov); CHECKRET(check); } symmetrize(n, cov); return COOP_OK; } int coop_covar_matmat_inplace_pairwise(const bool inv, const int m, const int nx, const double *const restrict x, const int ny, const double *const restrict y, double *restrict cov) { int check; double *vec = malloc(m * sizeof(*vec)); CHECKMALLOC(vec); for (int j=0; j<ny; j++) { const size_t mj = (size_t)m*j; memcpy(vec, y+mj, m*sizeof(*vec)); #pragma omp parallel for shared(j, vec, cov) if(m*nx > OMP_MIN_SIZE) for (int i=0; i<nx; i++) { const size_t mi = (size_t)m*i; int len; double meanx, meany; compute_sums(m, mi, vec, x, &meanx, &meany, &len); if (len == 0) { set_na_real(cov + (i + nx*j)); set_na_real(cov + (j + nx*i)); continue; } meanx /= (double) len; meany /= (double) len; double mmcp = 0.0; SAFE_SIMD for (int k=0; k<m; k++) { if (!isnan(vec[k]) && !isnan(x[k + mi])) mmcp += (vec[k] - meanx) * (x[k + mi] - meany); } cov[i + nx*j] = mmcp * ((double) 1.0/(len-1)); } } free(vec); if (nx == ny && inv) { check = inv_gen_lu(nx, cov); CHECKRET(check); } return COOP_OK; }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> // HLSL Change Starts #include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant namespace hlsl { struct UnusualAnnotation; } // HLSL Change Ends namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. if (getLangOpts().ModulesHideInternalLinkage) return isVisible(Old) || New->isExternallyVisible(); return true; } public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. // std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); unsigned deduceWeakPropertyFromType(QualType T) { if ((getLangOpts().getGC() != LangOptions::NonGC && T.isObjCGCWeak()) || (getLangOpts().ObjCAutoRefCount && T.getObjCLifetime() == Qualifiers::OCL_Weak)) return ObjCDeclSpec::DQ_PR_weak; return 0; } /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); VisibleModuleSet VisibleModules; llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack; Module *CachedFakeTopLevelModule; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity); /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } bool hasVisibleMergedDefinition(NamedDecl *Def); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; const IdentifierInfo *Keyword; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword), Keyword(Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); // HLSL Change Starts // This enumeration is used to determine whether a variable declaration // should shadow a prior declaration rather than merging. enum ShadowMergeState { ShadowMergeState_Disallowed, // shadowing is not allowed ShadowMergeState_Possible, // shadowing is possible (but may not occur) ShadowMergeState_Effective // the declaration should shadow a prior one }; // HLSL Change Ends NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition(FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {} bool ShouldSkip; NamedDecl *Previous; }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override }; void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType()); // Emit as a series of 'note's all template and non-templates // identified by the expression Expr void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType()); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, const SourceRange& OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, StringRef message); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr( CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); // HLSL Change Starts //===---------------------------- HLSL Features -------------------------===// /// cbuffer/tbuffer llvm::SmallVector<Decl*, 1> HLSLBuffers; Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc, IdentifierInfo *Ident, SourceLocation IdentLoc, std::vector<hlsl::UnusualAnnotation *>& BufferAttributes, SourceLocation LBrace); void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace); Decl* getActiveHLSLBuffer() const; void ActOnStartHLSLBufferView(); bool IsOnHLSLBufferView(); Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc, DeclGroupPtrTy &dcl, bool iscbuf); // HLSL Change Ends //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addRestrictAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. QualType performLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXMemberDefaultArgs(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief The stack of calls expression undergoing template instantiation. /// /// The top of this stack is used by a fixit instantiating unresolved /// function calls to fix the AST to match the textual change it prints. SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(), sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed /// \param CD The semantic container for the property /// \param redeclaredProperty Declaration for property if redeclared /// in class extension. /// \param lexicalDC Container for redeclaredProperty. void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty = nullptr, ObjCContainerDecl *lexicalDC = nullptr); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); // OpenMP directives and clauses. private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); public: /// \brief Check if the specified variable is used in a private clause in /// Checks if the specified variable is used in one of the private /// clauses in OpenMP constructs. bool IsOpenMPCapturedVar(VarDecl *VD); /// OpenMP constructs. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, SourceLocation DepLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and prepare for a conversion of the /// RHS to the LHS type. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); // HLSL Change Starts - checking array subscript access to vector or matrix member void CheckHLSLArrayAccess(const Expr *expr); // HLSL Change ends void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinCpuSupports(CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; // HLSL Change Starts bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, TypeSourceInfo* TInfo, bool isParameter); void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl); // HLSL Change Ends /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
TSDFVoxelGridImpl.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <atomic> #include <cmath> #include "open3d/core/Dispatch.h" #include "open3d/core/Dtype.h" #include "open3d/core/MemoryManager.h" #include "open3d/core/SizeVector.h" #include "open3d/core/Tensor.h" #include "open3d/t/geometry/Utility.h" #include "open3d/t/geometry/kernel/GeometryIndexer.h" #include "open3d/t/geometry/kernel/GeometryMacros.h" #include "open3d/t/geometry/kernel/TSDFVoxel.h" #include "open3d/t/geometry/kernel/TSDFVoxelGrid.h" #include "open3d/utility/Console.h" #include "open3d/utility/Timer.h" namespace open3d { namespace t { namespace geometry { namespace kernel { namespace tsdf { #if defined(__CUDACC__) void IntegrateCUDA #else void IntegrateCPU #endif (const core::Tensor& depth, const core::Tensor& color, const core::Tensor& indices, const core::Tensor& block_keys, core::Tensor& block_values, // Transforms const core::Tensor& intrinsics, const core::Tensor& extrinsics, // Parameters int64_t resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_max) { // Parameters int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); TransformIndexer transform_indexer(intrinsics, extrinsics, voxel_size); // Real data indexer NDArrayIndexer depth_indexer(depth, 2); NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); // Optional color integration NDArrayIndexer color_indexer; bool integrate_color = false; if (color.NumElements() != 0) { color_indexer = NDArrayIndexer(color, 2); integrate_color = true; } // Plain arrays that does not require indexers const int64_t* indices_ptr = static_cast<const int64_t*>(indices.GetDataPtr()); int64_t n = indices.GetLength() * resolution3; #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { // Natural index (0, N) -> (block_idx, voxel_idx) int64_t block_idx = indices_ptr[workload_idx / resolution3]; int64_t voxel_idx = workload_idx % resolution3; /// Coordinate transform // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtrFromCoord<int>( block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // coordinate in world (in voxel) int64_t x = (xb * resolution + xv); int64_t y = (yb * resolution + yv); int64_t z = (zb * resolution + zv); // coordinate in camera (in voxel -> in meter) float xc, yc, zc, u, v; transform_indexer.RigidTransform( static_cast<float>(x), static_cast<float>(y), static_cast<float>(z), &xc, &yc, &zc); // coordinate in image (in pixel) transform_indexer.Project(xc, yc, zc, &u, &v); if (!depth_indexer.InBoundary(u, v)) { return; } // Associate image workload and compute SDF and TSDF. float depth = *depth_indexer.GetDataPtrFromCoord<float>( static_cast<int64_t>(u), static_cast<int64_t>(v)) / depth_scale; float sdf = (depth - zc); if (depth <= 0 || depth > depth_max || zc <= 0 || sdf < -sdf_trunc) { return; } sdf = sdf < sdf_trunc ? sdf : sdf_trunc; sdf /= sdf_trunc; // Associate voxel workload and update TSDF/Weights voxel_t* voxel_ptr = voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( xv, yv, zv, block_idx); if (integrate_color) { float* color_ptr = color_indexer.GetDataPtrFromCoord<float>( static_cast<int64_t>(u), static_cast<int64_t>(v)); voxel_ptr->Integrate(sdf, color_ptr[0], color_ptr[1], color_ptr[2]); } else { voxel_ptr->Integrate(sdf); } }); }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } #if defined(__CUDACC__) void ExtractSurfacePointsCUDA #else void ExtractSurfacePointsCPU #endif (const core::Tensor& indices, const core::Tensor& nb_indices, const core::Tensor& nb_masks, const core::Tensor& block_keys, const core::Tensor& block_values, core::Tensor& points, utility::optional<std::reference_wrapper<core::Tensor>> normals, utility::optional<std::reference_wrapper<core::Tensor>> colors, int64_t resolution, float voxel_size, float weight_threshold, int& valid_size) { // Parameters int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); // Real data indexer NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer nb_block_masks_indexer(nb_masks, 2); NDArrayIndexer nb_block_indices_indexer(nb_indices, 2); // Plain arrays that does not require indexers const int64_t* indices_ptr = static_cast<const int64_t*>(indices.GetDataPtr()); int64_t n_blocks = indices.GetLength(); int64_t n = n_blocks * resolution3; // Output #if defined(__CUDACC__) core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32, block_values.GetDevice()); int* count_ptr = count.GetDataPtr<int>(); #else std::atomic<int> count_atomic(0); std::atomic<int>* count_ptr = &count_atomic; #endif #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif if (valid_size < 0) { utility::LogWarning( "No estimated max point cloud size provided, using a 2-pass " "estimation. Surface extraction could be slow."); // This pass determines valid number of points. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel( n, [=] OPEN3D_DEVICE(int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, // voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); voxel_t* voxel_ptr = voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float weight_o = voxel_ptr->GetWeight(); if (weight_o <= weight_threshold) return; // Enumerate x-y-z directions for (int i = 0; i < 3; ++i) { voxel_t* ptr = GetVoxelAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>( workload_block_idx)); if (ptr == nullptr) continue; float tsdf_i = ptr->GetTSDF(); float weight_i = ptr->GetWeight(); if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) { OPEN3D_ATOMIC_ADD(count_ptr, 1); } } }); }); #if defined(__CUDACC__) valid_size = count[0].Item<int>(); count[0] = 0; #else valid_size = (*count_ptr).load(); (*count_ptr) = 0; #endif } int max_count = valid_size; if (points.GetLength() == 0) { points = core::Tensor({max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } NDArrayIndexer point_indexer(points, 1); // Normals bool extract_normal = false; NDArrayIndexer normal_indexer; if (normals.has_value()) { extract_normal = true; if (normals.value().get().GetLength() == 0) { normals.value().get() = core::Tensor({max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } normal_indexer = NDArrayIndexer(normals.value().get(), 1); } // This pass extracts exact surface points. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { // Colors bool extract_color = false; NDArrayIndexer color_indexer; if (voxel_t::HasColor() && colors.has_value()) { extract_color = true; if (colors.value().get().GetLength() == 0) { colors.value().get() = core::Tensor( {max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } color_indexer = NDArrayIndexer(colors.value().get(), 1); } launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo, int curr_block_idx, float* n) { return DeviceGetNormalAt<voxel_t>( xo, yo, zo, curr_block_idx, n, static_cast<int>(resolution), voxel_size, nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = workload_idx % resolution3; /// Coordinate transform // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtrFromCoord<int>( block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); voxel_t* voxel_ptr = voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float weight_o = voxel_ptr->GetWeight(); if (weight_o <= weight_threshold) return; int64_t x = xb * resolution + xv; int64_t y = yb * resolution + yv; int64_t z = zb * resolution + zv; float no[3] = {0}, ni[3] = {0}; if (extract_normal) { GetNormalAt(static_cast<int>(xv), static_cast<int>(yv), static_cast<int>(zv), static_cast<int>(workload_block_idx), no); } // Enumerate x-y-z axis for (int i = 0; i < 3; ++i) { voxel_t* ptr = GetVoxelAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>(workload_block_idx)); if (ptr == nullptr) continue; float tsdf_i = ptr->GetTSDF(); float weight_i = ptr->GetWeight(); if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) { float ratio = (0 - tsdf_o) / (tsdf_i - tsdf_o); int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1); if (idx >= valid_size) { printf("Point cloud size larger than " "estimated, please increase the " "estimation!\n"); return; } float* point_ptr = point_indexer.GetDataPtrFromCoord<float>( idx); point_ptr[0] = voxel_size * (x + ratio * int(i == 0)); point_ptr[1] = voxel_size * (y + ratio * int(i == 1)); point_ptr[2] = voxel_size * (z + ratio * int(i == 2)); if (extract_color) { float* color_ptr = color_indexer .GetDataPtrFromCoord<float>( idx); float r_o = voxel_ptr->GetR(); float g_o = voxel_ptr->GetG(); float b_o = voxel_ptr->GetB(); float r_i = ptr->GetR(); float g_i = ptr->GetG(); float b_i = ptr->GetB(); color_ptr[0] = ((1 - ratio) * r_o + ratio * r_i) / 255.0f; color_ptr[1] = ((1 - ratio) * g_o + ratio * g_i) / 255.0f; color_ptr[2] = ((1 - ratio) * b_o + ratio * b_i) / 255.0f; } if (extract_normal) { GetNormalAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>(workload_block_idx), ni); float* normal_ptr = normal_indexer .GetDataPtrFromCoord<float>( idx); float nx = (1 - ratio) * no[0] + ratio * ni[0]; float ny = (1 - ratio) * no[1] + ratio * ni[1]; float nz = (1 - ratio) * no[2] + ratio * ni[2]; float norm = static_cast<float>( sqrt(nx * nx + ny * ny + nz * nz) + 1e-5); normal_ptr[0] = nx / norm; normal_ptr[1] = ny / norm; normal_ptr[2] = nz / norm; } } } }); }); #if defined(__CUDACC__) int total_count = count.Item<int>(); #else int total_count = (*count_ptr).load(); #endif utility::LogDebug("{} vertices extracted", total_count); valid_size = total_count; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } #if defined(__CUDACC__) void ExtractSurfaceMeshCUDA #else void ExtractSurfaceMeshCPU #endif (const core::Tensor& indices, const core::Tensor& inv_indices, const core::Tensor& nb_indices, const core::Tensor& nb_masks, const core::Tensor& block_keys, const core::Tensor& block_values, core::Tensor& vertices, core::Tensor& triangles, core::Tensor& normals, core::Tensor& colors, int64_t resolution, float voxel_size, float weight_threshold) { int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); // Output #if defined(__CUDACC__) core::CUDACachedMemoryManager::ReleaseCache(); #endif int n_blocks = static_cast<int>(indices.GetLength()); // Voxel-wise mesh info. 4 channels correspond to: // 3 edges' corresponding vertex index + 1 table index. core::Tensor mesh_structure; try { mesh_structure = core::Tensor::Zeros( {n_blocks, resolution, resolution, resolution, 4}, core::Dtype::Int32, block_keys.GetDevice()); } catch (const std::runtime_error&) { utility::LogError( "[MeshExtractionKernel] Unable to allocate assistance mesh " "structure for Marching " "Cubes with {} active voxel blocks. Please consider using a " "larger voxel size (currently {}) for TSDF " "integration, or using tsdf_volume.cpu() to perform mesh " "extraction on CPU.", n_blocks, voxel_size); } // Real data indexer NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer mesh_structure_indexer(mesh_structure, 4); NDArrayIndexer nb_block_masks_indexer(nb_masks, 2); NDArrayIndexer nb_block_indices_indexer(nb_indices, 2); // Plain arrays that does not require indexers const int64_t* indices_ptr = indices.GetDataPtr<int64_t>(); const int64_t* inv_indices_ptr = inv_indices.GetDataPtr<int64_t>(); int64_t n = n_blocks * resolution3; #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; #endif // Pass 0: analyze mesh structure, set up one-on-one correspondences from // edges to vertices. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Check per-vertex sign in the cube to determine cube type int table_idx = 0; for (int i = 0; i < 8; ++i) { voxel_t* voxel_ptr_i = GetVoxelAt( static_cast<int>(xv) + vtx_shifts[i][0], static_cast<int>(yv) + vtx_shifts[i][1], static_cast<int>(zv) + vtx_shifts[i][2], static_cast<int>(workload_block_idx)); if (voxel_ptr_i == nullptr) return; float tsdf_i = voxel_ptr_i->GetTSDF(); float weight_i = voxel_ptr_i->GetWeight(); if (weight_i <= weight_threshold) return; table_idx |= ((tsdf_i < 0) ? (1 << i) : 0); } int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv, yv, zv, workload_block_idx); mesh_struct_ptr[3] = table_idx; if (table_idx == 0 || table_idx == 255) return; // Check per-edge sign in the cube to determine cube type int edges_with_vertices = edge_table[table_idx]; for (int i = 0; i < 12; ++i) { if (edges_with_vertices & (1 << i)) { int64_t xv_i = xv + edge_shifts[i][0]; int64_t yv_i = yv + edge_shifts[i][1]; int64_t zv_i = zv + edge_shifts[i][2]; int edge_i = edge_shifts[i][3]; int dxb = static_cast<int>(xv_i / resolution); int dyb = static_cast<int>(yv_i / resolution); int dzb = static_cast<int>(zv_i / resolution); int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9; int64_t block_idx_i = *nb_block_indices_indexer .GetDataPtrFromCoord<int64_t>( workload_block_idx, nb_idx); int* mesh_ptr_i = mesh_structure_indexer.GetDataPtrFromCoord< int>(xv_i - dxb * resolution, yv_i - dyb * resolution, zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]); // Non-atomic write, but we are safe mesh_ptr_i[edge_i] = -1; } } }); }); // Pass 1: determine valid number of vertices. #if defined(__CUDACC__) core::Tensor vtx_count(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); int* vtx_count_ptr = vtx_count.GetDataPtr<int>(); #else std::atomic<int> vtx_count_atomic(0); std::atomic<int>* vtx_count_ptr = &vtx_count_atomic; #endif #if defined(__CUDACC__) core::kernel::CUDALauncher::LaunchGeneralKernel( n, [=] OPEN3D_DEVICE(int64_t workload_idx) { #else core::kernel::CPULauncher::LaunchGeneralKernel( n, [&](int64_t workload_idx) { #endif // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv, yv, zv, workload_block_idx); // Early quit -- no allocated vertex to compute if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 && mesh_struct_ptr[2] != -1) { return; } // Enumerate 3 edges in the voxel for (int e = 0; e < 3; ++e) { int vertex_idx = mesh_struct_ptr[e]; if (vertex_idx != -1) continue; OPEN3D_ATOMIC_ADD(vtx_count_ptr, 1); } }); // Reset count_ptr #if defined(__CUDACC__) int total_vtx_count = vtx_count.Item<int>(); vtx_count = core::Tensor(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); vtx_count_ptr = vtx_count.GetDataPtr<int>(); #else int total_vtx_count = (*vtx_count_ptr).load(); (*vtx_count_ptr) = 0; #endif utility::LogDebug("Total vertex count = {}", total_vtx_count); vertices = core::Tensor({total_vtx_count, 3}, core::Dtype::Float32, block_values.GetDevice()); normals = core::Tensor({total_vtx_count, 3}, core::Dtype::Float32, block_values.GetDevice()); NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer vertex_indexer(vertices, 1); NDArrayIndexer normal_indexer(normals, 1); // Pass 2: extract vertices. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { bool extract_color = false; NDArrayIndexer color_indexer; if (voxel_t::HasColor()) { extract_color = true; colors = core::Tensor({total_vtx_count, 3}, core::Dtype::Float32, block_values.GetDevice()); color_indexer = NDArrayIndexer(colors, 1); } launcher.LaunchGeneralKernel(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo, int curr_block_idx, float* n) { return DeviceGetNormalAt<voxel_t>( xo, yo, zo, curr_block_idx, n, static_cast<int>(resolution), voxel_size, nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = workload_idx % resolution3; // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtrFromCoord<int>( block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // global coordinate (in voxels) int64_t x = xb * resolution + xv; int64_t y = yb * resolution + yv; int64_t z = zb * resolution + zv; // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv, yv, zv, workload_block_idx); // Early quit -- no allocated vertex to compute if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 && mesh_struct_ptr[2] != -1) { return; } // Obtain voxel ptr voxel_t* voxel_ptr = voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float no[3] = {0}, ne[3] = {0}; GetNormalAt(static_cast<int>(xv), static_cast<int>(yv), static_cast<int>(zv), static_cast<int>(workload_block_idx), no); // Enumerate 3 edges in the voxel for (int e = 0; e < 3; ++e) { int vertex_idx = mesh_struct_ptr[e]; if (vertex_idx != -1) continue; voxel_t* voxel_ptr_e = GetVoxelAt( static_cast<int>(xv) + (e == 0), static_cast<int>(yv) + (e == 1), static_cast<int>(zv) + (e == 2), static_cast<int>(workload_block_idx)); float tsdf_e = voxel_ptr_e->GetTSDF(); float ratio = (0 - tsdf_o) / (tsdf_e - tsdf_o); int idx = OPEN3D_ATOMIC_ADD(vtx_count_ptr, 1); mesh_struct_ptr[e] = idx; float ratio_x = ratio * int(e == 0); float ratio_y = ratio * int(e == 1); float ratio_z = ratio * int(e == 2); float* vertex_ptr = vertex_indexer.GetDataPtrFromCoord<float>(idx); vertex_ptr[0] = voxel_size * (x + ratio_x); vertex_ptr[1] = voxel_size * (y + ratio_y); vertex_ptr[2] = voxel_size * (z + ratio_z); float* normal_ptr = normal_indexer.GetDataPtrFromCoord<float>(idx); GetNormalAt(static_cast<int>(xv) + (e == 0), static_cast<int>(yv) + (e == 1), static_cast<int>(zv) + (e == 2), static_cast<int>(workload_block_idx), ne); float nx = (1 - ratio) * no[0] + ratio * ne[0]; float ny = (1 - ratio) * no[1] + ratio * ne[1]; float nz = (1 - ratio) * no[2] + ratio * ne[2]; float norm = static_cast<float>( sqrt(nx * nx + ny * ny + nz * nz) + 1e-5); normal_ptr[0] = nx / norm; normal_ptr[1] = ny / norm; normal_ptr[2] = nz / norm; if (extract_color) { float* color_ptr = color_indexer.GetDataPtrFromCoord<float>( idx); float r_o = voxel_ptr->GetR(); float g_o = voxel_ptr->GetG(); float b_o = voxel_ptr->GetB(); float r_e = voxel_ptr_e->GetR(); float g_e = voxel_ptr_e->GetG(); float b_e = voxel_ptr_e->GetB(); color_ptr[0] = ((1 - ratio) * r_o + ratio * r_e) / 255.0f; color_ptr[1] = ((1 - ratio) * g_o + ratio * g_e) / 255.0f; color_ptr[2] = ((1 - ratio) * b_o + ratio * b_e) / 255.0f; } } }); }); // Pass 3: connect vertices and form triangles. #if defined(__CUDACC__) core::Tensor triangle_count(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); int* tri_count_ptr = triangle_count.GetDataPtr<int>(); #else std::atomic<int> tri_count_atomic(0); std::atomic<int>* tri_count_ptr = &tri_count_atomic; #endif triangles = core::Tensor({total_vtx_count * 3, 3}, core::Dtype::Int64, block_values.GetDevice()); NDArrayIndexer triangle_indexer(triangles, 1); #if defined(__CUDACC__) core::kernel::CUDALauncher::LaunchGeneralKernel( n, [=] OPEN3D_DEVICE(int64_t workload_idx) { #else core::kernel::CPULauncher::LaunchGeneralKernel( n, [&](int64_t workload_idx) { #endif // Natural index (0, N) -> (block_idx, // voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv, yv, zv, workload_block_idx); int table_idx = mesh_struct_ptr[3]; if (tri_count[table_idx] == 0) return; for (size_t tri = 0; tri < 16; tri += 3) { if (tri_table[table_idx][tri] == -1) return; int tri_idx = OPEN3D_ATOMIC_ADD(tri_count_ptr, 1); for (size_t vertex = 0; vertex < 3; ++vertex) { int edge = tri_table[table_idx][tri + vertex]; int64_t xv_i = xv + edge_shifts[edge][0]; int64_t yv_i = yv + edge_shifts[edge][1]; int64_t zv_i = zv + edge_shifts[edge][2]; int64_t edge_i = edge_shifts[edge][3]; int dxb = static_cast<int>(xv_i / resolution); int dyb = static_cast<int>(yv_i / resolution); int dzb = static_cast<int>(zv_i / resolution); int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9; int64_t block_idx_i = *nb_block_indices_indexer .GetDataPtrFromCoord<int64_t>( workload_block_idx, nb_idx); int* mesh_struct_ptr_i = mesh_structure_indexer.GetDataPtrFromCoord<int>( xv_i - dxb * resolution, yv_i - dyb * resolution, zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]); int64_t* triangle_ptr = triangle_indexer.GetDataPtrFromCoord<int64_t>( tri_idx); triangle_ptr[2 - vertex] = mesh_struct_ptr_i[edge_i]; } } }); #if defined(__CUDACC__) int total_tri_count = triangle_count.Item<int>(); #else int total_tri_count = (*tri_count_ptr).load(); #endif utility::LogDebug("Total triangle count = {}", total_tri_count); triangles = triangles.Slice(0, 0, total_tri_count); } #if defined(__CUDACC__) void EstimateRangeCUDA #else void EstimateRangeCPU #endif (const core::Tensor& block_keys, core::Tensor& range_minmax_map, const core::Tensor& intrinsics, const core::Tensor& extrinsics, int h, int w, int down_factor, int64_t block_resolution, float voxel_size, float depth_min, float depth_max) { // TODO(wei): reserve it in a reusable buffer // Every 2 channels: (min, max) int h_down = h / down_factor; int w_down = w / down_factor; range_minmax_map = core::Tensor({h_down, w_down, 2}, core::Dtype::Float32, block_keys.GetDevice()); NDArrayIndexer range_map_indexer(range_minmax_map, 2); // Every 6 channels: (v_min, u_min, v_max, u_max, z_min, z_max) const int fragment_size = 16; const int frag_buffer_size = 65535; // TODO(wei): explicit buffer core::Tensor fragment_buffer = core::Tensor({frag_buffer_size, 6}, core::Dtype::Float32, block_keys.GetDevice()); NDArrayIndexer frag_buffer_indexer(fragment_buffer, 1); NDArrayIndexer block_keys_indexer(block_keys, 1); TransformIndexer w2c_transform_indexer(intrinsics, extrinsics); #if defined(__CUDACC__) core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32, block_keys.GetDevice()); int* count_ptr = count.GetDataPtr<int>(); #else std::atomic<int> count_atomic(0); std::atomic<int>* count_ptr = &count_atomic; #endif #if defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; using std::max; using std::min; #endif // Pass 0: iterate over blocks, fill-in an rendering fragment array launcher.LaunchGeneralKernel( block_keys.GetLength(), [=] OPEN3D_DEVICE(int64_t workload_idx) { int* key = block_keys_indexer.GetDataPtrFromCoord<int>( workload_idx); int u_min = w_down - 1, v_min = h_down - 1, u_max = 0, v_max = 0; float z_min = depth_max, z_max = depth_min; float xc, yc, zc, u, v; // Project 8 corners to low-res image and form a rectangle for (int i = 0; i < 8; ++i) { float xw = (key[0] + ((i & 1) > 0)) * block_resolution * voxel_size; float yw = (key[1] + ((i & 2) > 0)) * block_resolution * voxel_size; float zw = (key[2] + ((i & 4) > 0)) * block_resolution * voxel_size; w2c_transform_indexer.RigidTransform(xw, yw, zw, &xc, &yc, &zc); if (zc <= 0) continue; // Project to the down sampled image buffer w2c_transform_indexer.Project(xc, yc, zc, &u, &v); u /= down_factor; v /= down_factor; v_min = min(static_cast<int>(floorf(v)), v_min); v_max = max(static_cast<int>(ceilf(v)), v_max); u_min = min(static_cast<int>(floorf(u)), u_min); u_max = max(static_cast<int>(ceilf(u)), u_max); z_min = min(z_min, zc); z_max = max(z_max, zc); } v_min = max(0, v_min); v_max = min(h_down - 1, v_max); u_min = max(0, u_min); u_max = min(w_down - 1, u_max); if (v_min >= v_max || u_min >= u_max || z_min >= z_max) return; // Divide the rectangle into small 16x16 fragments int frag_v_count = ceilf(float(v_max - v_min + 1) / float(fragment_size)); int frag_u_count = ceilf(float(u_max - u_min + 1) / float(fragment_size)); int frag_count = frag_v_count * frag_u_count; int frag_count_start = OPEN3D_ATOMIC_ADD(count_ptr, 1); int frag_count_end = frag_count_start + frag_count; if (frag_count_end >= frag_buffer_size) { printf("Fragment count exceeding buffer size, abort!\n"); } int offset = 0; for (int frag_v = 0; frag_v < frag_v_count; ++frag_v) { for (int frag_u = 0; frag_u < frag_u_count; ++frag_u, ++offset) { float* frag_ptr = frag_buffer_indexer.GetDataPtrFromCoord<float>( frag_count_start + offset); // zmin, zmax frag_ptr[0] = z_min; frag_ptr[1] = z_max; // vmin, umin frag_ptr[2] = v_min + frag_v * fragment_size; frag_ptr[3] = u_min + frag_u * fragment_size; // vmax, umax frag_ptr[4] = min(frag_ptr[2] + fragment_size - 1, static_cast<float>(v_max)); frag_ptr[5] = min(frag_ptr[3] + fragment_size - 1, static_cast<float>(u_max)); } } }); #if defined(__CUDACC__) int frag_count = count[0].Item<int>(); #else int frag_count = (*count_ptr).load(); #endif // Pass 0.5: Fill in range map to prepare for atomic min/max launcher.LaunchGeneralKernel( h_down * w_down, [=] OPEN3D_DEVICE(int64_t workload_idx) { int v = workload_idx / w_down; int u = workload_idx % w_down; float* range_ptr = range_map_indexer.GetDataPtrFromCoord<float>(u, v); range_ptr[0] = depth_max; range_ptr[1] = depth_min; }); // Pass 1: iterate over rendering fragment array, fill-in range launcher.LaunchGeneralKernel( frag_count * fragment_size * fragment_size, [=] OPEN3D_DEVICE(int64_t workload_idx) { int frag_idx = workload_idx / (fragment_size * fragment_size); int local_idx = workload_idx % (fragment_size * fragment_size); int dv = local_idx / fragment_size; int du = local_idx % fragment_size; float* frag_ptr = frag_buffer_indexer.GetDataPtrFromCoord<float>( frag_idx); int v_min = static_cast<int>(frag_ptr[2]); int u_min = static_cast<int>(frag_ptr[3]); int v_max = static_cast<int>(frag_ptr[4]); int u_max = static_cast<int>(frag_ptr[5]); int v = v_min + dv; int u = u_min + du; if (v > v_max || u > u_max) return; float z_min = frag_ptr[0]; float z_max = frag_ptr[1]; float* range_ptr = range_map_indexer.GetDataPtrFromCoord<float>(u, v); #ifdef __CUDACC__ atomicMinf(&(range_ptr[0]), z_min); atomicMaxf(&(range_ptr[1]), z_max); #else #pragma omp critical { range_ptr[0] = min(z_min, range_ptr[0]); range_ptr[1] = max(z_max, range_ptr[1]); } #endif }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } struct BlockCache { int x; int y; int z; int block_idx; inline int OPEN3D_DEVICE Check(int xin, int yin, int zin) { return (xin == x && yin == y && zin == z) ? block_idx : -1; } inline void OPEN3D_DEVICE Update(int xin, int yin, int zin, int block_idx_in) { x = xin; y = yin; z = zin; block_idx = block_idx_in; } }; #if defined(__CUDACC__) void RayCastCUDA #else void RayCastCPU #endif (std::shared_ptr<core::DeviceHashmap>& hashmap, const core::Tensor& block_values, const core::Tensor& range_map, core::Tensor& vertex_map, core::Tensor& depth_map, core::Tensor& color_map, core::Tensor& normal_map, const core::Tensor& intrinsics, const core::Tensor& extrinsics, int h, int w, int64_t block_resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_min, float depth_max, float weight_threshold) { using Key = core::Block<int, 3>; using Hash = core::BlockHash<int, 3>; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) auto cuda_hashmap = std::dynamic_pointer_cast<core::StdGPUHashmap<Key, Hash>>(hashmap); if (cuda_hashmap == nullptr) { utility::LogError( "Unsupported backend: CUDA raycasting only supports STDGPU."); } auto hashmap_impl = cuda_hashmap->GetImpl(); #else auto cpu_hashmap = std::dynamic_pointer_cast<core::TBBHashmap<Key, Hash>>(hashmap); auto hashmap_impl = *cpu_hashmap->GetImpl(); #endif NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer range_map_indexer(range_map, 2); NDArrayIndexer vertex_map_indexer; NDArrayIndexer depth_map_indexer; NDArrayIndexer color_map_indexer; NDArrayIndexer normal_map_indexer; bool enable_vertex = (vertex_map.GetLength() != 0); bool enable_depth = (depth_map.GetLength() != 0); bool enable_color = (color_map.GetLength() != 0); bool enable_normal = (normal_map.GetLength() != 0); if (!enable_vertex && !enable_depth && !enable_color && !enable_normal) { utility::LogWarning("No output specified for ray casting, exit."); return; } if (enable_vertex) { vertex_map_indexer = NDArrayIndexer(vertex_map, 2); } if (enable_depth) { depth_map_indexer = NDArrayIndexer(depth_map, 2); } if (enable_color) { color_map_indexer = NDArrayIndexer(color_map, 2); } if (enable_normal) { normal_map_indexer = NDArrayIndexer(normal_map, 2); } TransformIndexer c2w_transform_indexer( intrinsics, t::geometry::InverseTransformation(extrinsics)); TransformIndexer w2c_transform_indexer(intrinsics, extrinsics); int64_t rows = h; int64_t cols = w; float block_size = voxel_size * block_resolution; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) core::kernel::CUDALauncher launcher; #else core::kernel::CPULauncher launcher; using std::max; #endif DISPATCH_BYTESIZE_TO_VOXEL(voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher.LaunchGeneralKernel( rows * cols, [=] OPEN3D_DEVICE(int64_t workload_idx) { auto GetVoxelAtP = [&] OPEN3D_DEVICE( int x_b, int y_b, int z_b, int x_v, int y_v, int z_v, core::addr_t block_addr, BlockCache& cache) -> voxel_t* { int x_vn = (x_v + block_resolution) % block_resolution; int y_vn = (y_v + block_resolution) % block_resolution; int z_vn = (z_v + block_resolution) % block_resolution; int dx_b = Sign(x_v - x_vn); int dy_b = Sign(y_v - y_vn); int dz_b = Sign(z_v - z_vn); if (dx_b == 0 && dy_b == 0 && dz_b == 0) { return voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>(x_v, y_v, z_v, block_addr); } else { Key key; key.Set(0, x_b + dx_b); key.Set(1, y_b + dy_b); key.Set(2, z_b + dz_b); int block_addr = cache.Check(key.Get(0), key.Get(1), key.Get(2)); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return nullptr; block_addr = iter->second; cache.Update(key.Get(0), key.Get(1), key.Get(2), block_addr); } return voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>( x_vn, y_vn, z_vn, block_addr); } }; auto GetVoxelAtT = [&] OPEN3D_DEVICE( float x_o, float y_o, float z_o, float x_d, float y_d, float z_d, float t, BlockCache& cache) -> voxel_t* { float x_g = x_o + t * x_d; float y_g = y_o + t * y_d; float z_g = z_o + t * z_d; // Block coordinate and look up int x_b = static_cast<int>(floorf(x_g / block_size)); int y_b = static_cast<int>(floorf(y_g / block_size)); int z_b = static_cast<int>(floorf(z_g / block_size)); Key key; key.Set(0, x_b); key.Set(1, y_b); key.Set(2, z_b); int block_addr = cache.Check(x_b, y_b, z_b); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return nullptr; block_addr = iter->second; cache.Update(x_b, y_b, z_b, block_addr); } // Voxel coordinate and look up int x_v = int((x_g - x_b * block_size) / voxel_size); int y_v = int((y_g - y_b * block_size) / voxel_size); int z_v = int((z_g - z_b * block_size) / voxel_size); return voxel_block_buffer_indexer .GetDataPtrFromCoord<voxel_t>(x_v, y_v, z_v, block_addr); }; int64_t y = workload_idx / cols; int64_t x = workload_idx % cols; float *depth_ptr = nullptr, *vertex_ptr = nullptr, *normal_ptr = nullptr, *color_ptr = nullptr; if (enable_depth) { depth_ptr = depth_map_indexer.GetDataPtrFromCoord<float>(x, y); *depth_ptr = 0; } if (enable_vertex) { vertex_ptr = vertex_map_indexer.GetDataPtrFromCoord<float>( x, y); vertex_ptr[0] = 0; vertex_ptr[1] = 0; vertex_ptr[2] = 0; } if (enable_color) { color_ptr = color_map_indexer.GetDataPtrFromCoord<float>(x, y); color_ptr[0] = 0; color_ptr[1] = 0; color_ptr[2] = 0; } if (enable_normal) { normal_ptr = normal_map_indexer.GetDataPtrFromCoord<float>( x, y); normal_ptr[0] = 0; normal_ptr[1] = 0; normal_ptr[2] = 0; } const float* range = range_map_indexer.GetDataPtrFromCoord<float>(x / 8, y / 8); float t = range[0]; const float t_max = range[1]; if (t >= t_max) return; // Coordinates in camera and global float x_c = 0, y_c = 0, z_c = 0; float x_g = 0, y_g = 0, z_g = 0; float x_o = 0, y_o = 0, z_o = 0; // Iterative ray intersection check float t_prev = t; float tsdf_prev = -1.0f; float tsdf = 1.0; float w = 0.0; // Camera origin c2w_transform_indexer.RigidTransform(0, 0, 0, &x_o, &y_o, &z_o); // Direction c2w_transform_indexer.Unproject(static_cast<float>(x), static_cast<float>(y), 1.0f, &x_c, &y_c, &z_c); c2w_transform_indexer.RigidTransform(x_c, y_c, z_c, &x_g, &y_g, &z_g); float x_d = (x_g - x_o); float y_d = (y_g - y_o); float z_d = (z_g - z_o); BlockCache cache{0, 0, 0, -1}; bool surface_found = false; while (t < t_max) { voxel_t* voxel_ptr = GetVoxelAtT(x_o, y_o, z_o, x_d, y_d, z_d, t, cache); if (!voxel_ptr) { t_prev = t; t += block_size; } else { tsdf_prev = tsdf; tsdf = voxel_ptr->GetTSDF(); w = voxel_ptr->GetWeight(); if (tsdf_prev > 0 && w >= weight_threshold && tsdf <= 0) { surface_found = true; break; } t_prev = t; float delta = tsdf * sdf_trunc; t += delta < voxel_size ? voxel_size : delta; } } if (surface_found) { float t_intersect = (t * tsdf_prev - t_prev * tsdf) / (tsdf_prev - tsdf); x_g = x_o + t_intersect * x_d; y_g = y_o + t_intersect * y_d; z_g = z_o + t_intersect * z_d; // Trivial vertex assignment if (enable_depth) { *depth_ptr = t_intersect * depth_scale; } if (enable_vertex) { w2c_transform_indexer.RigidTransform( x_g, y_g, z_g, vertex_ptr + 0, vertex_ptr + 1, vertex_ptr + 2); } // Trilinear interpolation // TODO(wei): simplify the flow by splitting the // functions given what is enabled if (enable_color || enable_normal) { int x_b = static_cast<int>(floorf(x_g / block_size)); int y_b = static_cast<int>(floorf(y_g / block_size)); int z_b = static_cast<int>(floorf(z_g / block_size)); float x_v = (x_g - float(x_b) * block_size) / voxel_size; float y_v = (y_g - float(y_b) * block_size) / voxel_size; float z_v = (z_g - float(z_b) * block_size) / voxel_size; Key key; key.Set(0, x_b); key.Set(1, y_b); key.Set(2, z_b); int block_addr = cache.Check(x_b, y_b, z_b); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return; block_addr = iter->second; cache.Update(x_b, y_b, z_b, block_addr); } int x_v_floor = static_cast<int>(floorf(x_v)); int y_v_floor = static_cast<int>(floorf(y_v)); int z_v_floor = static_cast<int>(floorf(z_v)); float ratio_x = x_v - float(x_v_floor); float ratio_y = y_v - float(y_v_floor); float ratio_z = z_v - float(z_v_floor); float sum_weight_color = 0.0; float sum_weight_normal = 0.0; for (int k = 0; k < 8; ++k) { int dx_v = (k & 1) > 0 ? 1 : 0; int dy_v = (k & 2) > 0 ? 1 : 0; int dz_v = (k & 4) > 0 ? 1 : 0; float ratio = (dx_v * (ratio_x) + (1 - dx_v) * (1 - ratio_x)) * (dy_v * (ratio_y) + (1 - dy_v) * (1 - ratio_y)) * (dz_v * (ratio_z) + (1 - dz_v) * (1 - ratio_z)); voxel_t* voxel_ptr_k = GetVoxelAtP( x_b, y_b, z_b, x_v_floor + dx_v, y_v_floor + dy_v, z_v_floor + dz_v, block_addr, cache); if (enable_color && voxel_ptr_k && voxel_ptr_k->GetWeight() > 0) { sum_weight_color += ratio; color_ptr[0] += ratio * voxel_ptr_k->GetR(); color_ptr[1] += ratio * voxel_ptr_k->GetG(); color_ptr[2] += ratio * voxel_ptr_k->GetB(); } if (enable_normal) { for (int dim = 0; dim < 3; ++dim) { voxel_t* voxel_ptr_k_plus = GetVoxelAtP( x_b, y_b, z_b, x_v_floor + dx_v + (dim == 0), y_v_floor + dy_v + (dim == 1), z_v_floor + dz_v + (dim == 2), block_addr, cache); voxel_t* voxel_ptr_k_minus = GetVoxelAtP(x_b, y_b, z_b, x_v_floor + dx_v - (dim == 0), y_v_floor + dy_v - (dim == 1), z_v_floor + dz_v - (dim == 2), block_addr, cache); bool valid = false; if (voxel_ptr_k_plus && voxel_ptr_k_plus->GetWeight() > 0) { normal_ptr[dim] += ratio * voxel_ptr_k_plus ->GetTSDF() / (2 * voxel_size); valid = true; } if (voxel_ptr_k_minus && voxel_ptr_k_minus->GetWeight() > 0) { normal_ptr[dim] -= ratio * voxel_ptr_k_minus ->GetTSDF() / (2 * voxel_size); valid = true; } sum_weight_normal += valid ? ratio : 0; } } // if (enable_normal) } // loop over 8 neighbors if (enable_color && sum_weight_color > 0) { sum_weight_color *= 255.0; color_ptr[0] /= sum_weight_color; color_ptr[1] /= sum_weight_color; color_ptr[2] /= sum_weight_color; } if (enable_normal && sum_weight_normal > 0) { normal_ptr[0] /= sum_weight_normal; normal_ptr[1] /= sum_weight_normal; normal_ptr[2] /= sum_weight_normal; float norm = sqrt(normal_ptr[0] * normal_ptr[0] + normal_ptr[1] * normal_ptr[1] + normal_ptr[2] * normal_ptr[2]); w2c_transform_indexer.Rotate( normal_ptr[0] / norm, normal_ptr[1] / norm, normal_ptr[2] / norm, normal_ptr + 0, normal_ptr + 1, normal_ptr + 2); } } // if (color or normal) } // if (tsdf < 0) }); }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } } // namespace tsdf } // namespace kernel } // namespace geometry } // namespace t } // namespace open3d
GB_unop__identity_fp32_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp32_fp64 // op(A') function: GB_unop_tran__identity_fp32_fp64 // C type: float // A type: double // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp32_fp64 ( float *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
PVSortFilter.h
/* * MIT License * * © ESI Group, 2015 * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef PVSORTFILTER_H #define PVSORTFILTER_H #include <Qt> #include <vector> #include <numeric> #include <inendi/PVSelection.h> #include <pvcop/db/array.h> #include <pvhwloc.h> #include <omp.h> namespace PVGuiQt { class PVSortFilter { public: explicit PVSortFilter(size_t row_count) : _filter(row_count), _sort(row_count) { auto& sort = _sort.to_core_array(); std::iota(sort.begin(), sort.end(), 0); std::iota(_filter.begin(), _filter.end(), 0); } PVRow row_pos_to_index(PVRow idx) const { return _filter[idx]; } PVRow row_pos_from_index(PVRow idx) const { return std::distance(_filter.begin(), std::find(_filter.begin(), _filter.end(), idx)); } size_t size() const { return _filter.size(); } void set_filter(Inendi::PVSelection const& sel) { size_t num_threads = pvhwloc::thread_count(); std::vector<std::vector<PVRow>> filters(num_threads); size_t elts[num_threads]; auto const& sort = _sort.to_core_array(); _filter.resize(sort.size()); #pragma omp parallel num_threads(num_threads) { auto& filter = filters[omp_get_thread_num()]; filter.resize(sort.size() / omp_get_num_threads() + 1); size_t i = 0; #pragma omp for schedule(static) nowait for (auto it = sort.begin(); it < sort.end(); it++) { if (sel.get_line_fast(*it)) { filter[i++] = *it; } } // Update it at the end to avoid false-sharing elts[omp_get_thread_num()] = i; } auto begin = _filter.begin(); for (size_t i = 0; i < num_threads; i++) { begin = std::copy_n(filters[i].begin(), elts[i], begin); } // Push selected lines _filter.resize(std::distance(_filter.begin(), begin)); if (_sort_order == Qt::DescendingOrder) { std::reverse(_filter.begin(), _filter.end()); } } std::vector<PVRow> const& shown_lines() const { return _filter; } pvcop::db::indexes& sorting() { return _sort; } PVCombCol sorted_column() const { return _sorted_column; } void set_filter_as_sort() { auto const& sort = _sort.to_core_array(); if (_sort_order != Qt::DescendingOrder) { std::copy(sort.begin(), sort.end(), _filter.begin()); } else { std::copy(sort.begin(), sort.end(), _filter.rbegin()); } } void set_sorted_meta(PVCombCol col, Qt::SortOrder order) { _sorted_column = col; _sort_order = order; } private: std::vector<PVRow> _filter; //!< Lines to use, map listing_row_id to nraw_row_id unsorted pvcop::db::indexes _sort; //!< Sorted lines, map listing not filtered position to nraw position PVCombCol _sorted_column; //!< The current sorted column Qt::SortOrder _sort_order; //!< The sort order of the current sorted column }; } // namespace PVGuiQt #endif
GB_unaryop__abs_int64_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_int32 // op(A') function: GB_tran__abs_int64_int32 // C type: int64_t // A type: int32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_int32 ( int64_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
paint.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP AAA IIIII N N TTTTT % % P P A A I NN N T % % PPPP AAAAA I N N N T % % P A A I N NN T % % P A A IIIII N N T % % % % % % Methods to Paint on an Image % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o o d f i l l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FloodfillPaintImage() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % The format of the FloodfillPaintImage method is: % % MagickBooleanType FloodfillPaintImage(Image *image, % const ChannelType channel,const DrawInfo *draw_info, % const MagickPixelPacket target,const ssize_t x_offset, % const ssize_t y_offset,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x_offset,y_offset: the starting location of the operation. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType FloodfillPaintImage(Image *image, const ChannelType channel,const DrawInfo *draw_info, const MagickPixelPacket *target,const ssize_t x_offset,const ssize_t y_offset, const MagickBooleanType invert) { #define MaxStacksize 524288UL #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } CacheView *floodplane_view, *image_view; ExceptionInfo *exception; Image *floodplane_image; MagickBooleanType skip; MagickPixelPacket fill, pixel; MemoryInfo *segment_info; PixelPacket fill_color; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); if ((image->matte == MagickFalse) && (draw_info->fill.opacity != OpaqueOpacity)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Set floodfill state. */ floodplane_image=CloneImage(image,0,0,MagickTrue,&image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack)); if (segment_info == (MemoryInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info); /* Push initial segment on stack. */ x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); GetMagickPixelPacket(image,&fill); GetMagickPixelPacket(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception); while (s > segment_stack) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception); q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p--; q--; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y, image->columns-x,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) == invert) break; q->opacity=(Quantum) TransparentOpacity; p++; q++; } if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1, exception); q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; SetMagickPixelPacket(image,p,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; /* Tile fill color onto floodplane. */ p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) { (void) GetFillColor(draw_info,x,y,&fill_color); SetMagickPixelPacket(image,&fill_color,(IndexPacket *) NULL,&fill); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&fill); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(fill.blue)); if (((channel & OpacityChannel) != 0) || (draw_info->fill.opacity != OpaqueOpacity)) SetPixelOpacity(q,ClampToQuantum(fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(fill.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; } floodplane_view=DestroyCacheView(floodplane_view); image_view=DestroyCacheView(image_view); segment_info=RelinquishVirtualMemory(segment_info); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GradientImage() applies a continuously smooth color transitions along a % vector from one color to another. % % Note, the interface of this method will change in the future to support % more than one transistion. % % The format of the GradientImage method is: % % MagickBooleanType GradientImage(Image *image,const GradientType type, % const SpreadMethod method,const PixelPacket *start_color, % const PixelPacket *stop_color) % % A description of each parameter follows: % % o image: the image. % % o type: the gradient type: linear or radial. % % o spread: the gradient spread meathod: pad, reflect, or repeat. % % o start_color: the start color. % % o stop_color: the stop color. % % This provides a good example of making use of the DrawGradientImage % function and the gradient structure in draw_info. % */ MagickExport MagickBooleanType GradientImage(Image *image, const GradientType type,const SpreadMethod method, const PixelPacket *start_color,const PixelPacket *stop_color) { const char *artifact; DrawInfo *draw_info; GradientInfo *gradient; MagickBooleanType status; register ssize_t i; /* Set gradient start-stop end points. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(start_color != (const PixelPacket *) NULL); assert(stop_color != (const PixelPacket *) NULL); draw_info=AcquireDrawInfo(); gradient=(&draw_info->gradient); gradient->type=type; gradient->bounding_box.width=image->columns; gradient->bounding_box.height=image->rows; artifact=GetImageArtifact(image,"gradient:bounding-box"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box); gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; artifact=GetImageArtifact(image,"gradient:direction"); if (artifact != (const char *) NULL) { GravityType direction; direction=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,artifact); switch (direction) { case NorthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case NorthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=(double) image->rows-1; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case WestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=0.0; break; } case EastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=0.0; break; } case SouthWestGravity: { gradient->gradient_vector.x1=(double) image->columns-1; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->rows-1; break; } case SouthGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=0.0; gradient->gradient_vector.y2=(double) image->columns-1; break; } case SouthEastGravity: { gradient->gradient_vector.x1=0.0; gradient->gradient_vector.y1=0.0; gradient->gradient_vector.x2=(double) image->columns-1; gradient->gradient_vector.y2=(double) image->rows-1; break; } default: break; } } artifact=GetImageArtifact(image,"gradient:angle"); if (artifact != (const char *) NULL) gradient->angle=(MagickRealType) StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"gradient:vector"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf", &gradient->gradient_vector.x1,&gradient->gradient_vector.y1, &gradient->gradient_vector.x2,&gradient->gradient_vector.y2); if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) && (GetImageArtifact(image,"gradient:direction") == (const char *) NULL) && (GetImageArtifact(image,"gradient:extent") == (const char *) NULL) && (GetImageArtifact(image,"gradient:vector") == (const char *) NULL)) if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0)) gradient->gradient_vector.x2=0.0; gradient->center.x=(double) gradient->gradient_vector.x2/2.0; gradient->center.y=(double) gradient->gradient_vector.y2/2.0; artifact=GetImageArtifact(image,"gradient:center"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x, &gradient->center.y); artifact=GetImageArtifact(image,"gradient:angle"); if ((type == LinearGradient) && (artifact != (const char *) NULL)) { double sine, cosine, distance; /* Reference https://drafts.csswg.org/css-images-3/#linear-gradients. */ sine=sin((double) DegreesToRadians(gradient->angle-90.0)); cosine=cos((double) DegreesToRadians(gradient->angle-90.0)); distance=fabs((double) (image->columns-1)*cosine)+ fabs((double) (image->rows-1)*sine); gradient->gradient_vector.x1=0.5*((image->columns-1)-distance*cosine); gradient->gradient_vector.y1=0.5*((image->rows-1)-distance*sine); gradient->gradient_vector.x2=0.5*((image->columns-1)+distance*cosine); gradient->gradient_vector.y2=0.5*((image->rows-1)+distance*sine); } gradient->radii.x=(double) MagickMax((image->columns-1),(image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; artifact=GetImageArtifact(image,"gradient:extent"); if (artifact != (const char *) NULL) { if (LocaleCompare(artifact,"Circle") == 0) { gradient->radii.x=(double) (MagickMax((image->columns-1), (image->rows-1)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Diagonal") == 0) { gradient->radii.x=(double) (sqrt((double) (image->columns-1)* (image->columns-1)+(image->rows-1)*(image->rows-1)))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Ellipse") == 0) { gradient->radii.x=(double) (image->columns-1)/2.0; gradient->radii.y=(double) (image->rows-1)/2.0; } if (LocaleCompare(artifact,"Maximum") == 0) { gradient->radii.x=(double) MagickMax((image->columns-1), (image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; } if (LocaleCompare(artifact,"Minimum") == 0) { gradient->radii.x=(double) MagickMin((image->columns-1), (image->rows-1))/2.0; gradient->radii.y=gradient->radii.x; } } artifact=GetImageArtifact(image,"gradient:radii"); if (artifact != (const char *) NULL) (void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x, &gradient->radii.y); gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y); gradient->spread=method; /* Define the gradient to fill between the stops. */ gradient->number_stops=2; gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops, sizeof(*gradient->stops)); if (gradient->stops == (StopInfo *) NULL) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(gradient->stops,0,gradient->number_stops* sizeof(*gradient->stops)); for (i=0; i < (ssize_t) gradient->number_stops; i++) GetMagickPixelPacket(image,&gradient->stops[i].color); SetMagickPixelPacket(image,start_color,(IndexPacket *) NULL, &gradient->stops[0].color); gradient->stops[0].offset=0.0; SetMagickPixelPacket(image,stop_color,(IndexPacket *) NULL, &gradient->stops[1].color); gradient->stops[1].offset=1.0; /* Draw a gradient on the image. */ status=DrawGradientImage(image,draw_info); draw_info=DestroyDrawInfo(draw_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O i l P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OilPaintImage() applies a special effect filter that simulates an oil % painting. Each pixel is replaced by the most frequent color occurring % in a circular region defined by radius. % % The format of the OilPaintImage method is: % % Image *OilPaintImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the circular neighborhood. % % o exception: return any errors or warnings in this structure. % */ static size_t **DestroyHistogramThreadSet(size_t **histogram) { register ssize_t i; assert(histogram != (size_t **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (histogram[i] != (size_t *) NULL) histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]); histogram=(size_t **) RelinquishMagickMemory(histogram); return(histogram); } static size_t **AcquireHistogramThreadSet(const size_t count) { register ssize_t i; size_t **histogram, number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); histogram=(size_t **) AcquireQuantumMemory(number_threads, sizeof(*histogram)); if (histogram == (size_t **) NULL) return((size_t **) NULL); (void) memset(histogram,0,number_threads*sizeof(*histogram)); for (i=0; i < (ssize_t) number_threads; i++) { histogram[i]=(size_t *) AcquireQuantumMemory(count, sizeof(**histogram)); if (histogram[i] == (size_t *) NULL) return(DestroyHistogramThreadSet(histogram)); } return(histogram); } MagickExport Image *OilPaintImage(const Image *image,const double radius, ExceptionInfo *exception) { #define NumberPaintBins 256 #define OilPaintImageTag "OilPaint/Image" CacheView *image_view, *paint_view; Image *linear_image, *paint_image; MagickBooleanType status; MagickOffsetType progress; size_t **magick_restrict histograms, width; ssize_t y; /* Initialize painted image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,0.5); linear_image=CloneImage(image,0,0,MagickTrue,exception); paint_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (paint_image != (Image *) NULL) linear_image=DestroyImage(paint_image); return((Image *) NULL); } if (SetImageStorageClass(paint_image,DirectClass) == MagickFalse) { InheritException(exception,&paint_image->exception); linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); return((Image *) NULL); } histograms=AcquireHistogramThreadSet(NumberPaintBins); if (histograms == (size_t **) NULL) { linear_image=DestroyImage(linear_image); paint_image=DestroyImage(paint_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Oil paint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); paint_view=AcquireAuthenticCacheView(paint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,paint_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict paint_indexes; register ssize_t x; register PixelPacket *magick_restrict q; register size_t *histogram; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (width/2L),linear_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); paint_indexes=GetCacheViewAuthenticIndexQueue(paint_view); histogram=histograms[GetOpenMPThreadId()]; for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i, u; size_t count; ssize_t j, k, v; /* Assign most frequent color. */ i=0; j=0; count=0; (void) memset(histogram,0,NumberPaintBins*sizeof(*histogram)); for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { k=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity( linear_image,p+u+i))); histogram[k]++; if (histogram[k] > count) { j=i+u; count=histogram[k]; } } i+=(ssize_t) (linear_image->columns+width); } *q=(*(p+j)); if (linear_image->colorspace == CMYKColorspace) SetPixelIndex(paint_indexes+x,GetPixelIndex(indexes+x+j)); p++; q++; } if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OilPaintImage) #endif proceed=SetImageProgress(image,OilPaintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } paint_view=DestroyCacheView(paint_view); image_view=DestroyCacheView(image_view); histograms=DestroyHistogramThreadSet(histograms); linear_image=DestroyImage(linear_image); if (status == MagickFalse) paint_image=DestroyImage(paint_image); return(paint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaquePaintImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the OpaquePaintImage method is: % % MagickBooleanType OpaquePaintImage(Image *image, % const PixelPacket *target,const PixelPacket *fill, % const MagickBooleanType invert) % MagickBooleanType OpaquePaintImageChannel(Image *image, % const ChannelType channel,const PixelPacket *target, % const PixelPacket *fill,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o fill: the replacement color. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType OpaquePaintImage(Image *image, const MagickPixelPacket *target,const MagickPixelPacket *fill, const MagickBooleanType invert) { return(OpaquePaintImageChannel(image,CompositeChannels,target,fill,invert)); } MagickExport MagickBooleanType OpaquePaintImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *target, const MagickPixelPacket *fill,const MagickBooleanType invert) { #define OpaquePaintImageTag "Opaque/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket conform_fill, conform_target, zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (MagickPixelPacket *) NULL); assert(fill != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); ConformMagickPixelPacket(image,fill,&conform_fill,exception); ConformMagickPixelPacket(image,target,&conform_target,exception); /* Make image color opaque. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,&conform_target) != invert) { if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(conform_fill.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(conform_fill.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(conform_fill.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(conform_fill.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,ClampToQuantum(conform_fill.index)); } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OpaquePaintImageChannel) #endif proceed=SetImageProgress(image,OpaquePaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *target,const Quantum opacity, % const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o target: the target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImage(Image *image, const MagickPixelPacket *target,const Quantum opacity, const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(target != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); if (IsMagickColorSimilar(&pixel,target) != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImage) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t P a i n t I m a g e C h r o m a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentPaintImageChroma() changes the opacity value associated with any % pixel that matches color to the value defined by opacity. % % As there is one fuzz value for the all the channels, the % TransparentPaintImage() API is not suitable for the operations like chroma, % where the tolerance for similarity of two color component (RGB) can be % different, Thus we define this method take two target pixels (one % low and one hight) and all the pixels of an image which are lying between % these two pixels are made transparent. % % The format of the TransparentPaintImage method is: % % MagickBooleanType TransparentPaintImage(Image *image, % const MagickPixelPacket *low,const MagickPixelPacket *hight, % const Quantum opacity,const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o low: the low target color. % % o high: the high target color. % % o opacity: the replacement opacity value. % % o invert: paint any pixel that does not match the target color. % */ MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image, const MagickPixelPacket *low,const MagickPixelPacket *high, const Quantum opacity,const MagickBooleanType invert) { #define TransparentPaintImageTag "Transparent/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(high != (MagickPixelPacket *) NULL); assert(low != (MagickPixelPacket *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,ResetAlphaChannel); /* Make image color transparent. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType match; MagickPixelPacket pixel; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); GetMagickPixelPacket(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); match=((pixel.red >= low->red) && (pixel.red <= high->red) && (pixel.green >= low->green) && (pixel.green <= high->green) && (pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue : MagickFalse; if (match != invert) q->opacity=opacity; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransparentPaintImageChroma) #endif proceed=SetImageProgress(image,TransparentPaintImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
DRB025-simdtruedep-var-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This one has race condition due to true dependence. But data races happen at instruction level, not thread level. Data race pair: a[i+1]@68:5 vs. a[i]@68:12 */ #include <stdlib.h> int main(int argc, char * argv[]) { int i; int len = 100; int a[len], b[len]; int _ret_val_0; if (argc>1) { len=atoi(argv[1]); } #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { a[i]=i; b[i]=(i+1); } #pragma cetus private(i) #pragma loop name main#1 for (i=0; i<(len-1); i ++ ) { a[i+1]=(a[i]*b[i]); } #pragma cetus private(i) #pragma loop name main#2 for (i=0; i<len; i ++ ) { printf("i=%d a[%d]=%d\n", i, i, a[i]); } _ret_val_0=0; return _ret_val_0; }
streamer_work.c
#include "grid.h" #include "config.h" #include "streamer.h" #include <hdf5.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <complex.h> #include <string.h> #include <omp.h> #include <float.h> #include <pthread.h> #include <sys/mman.h> #include <sys/wait.h> uint64_t streamer_degrid_worker(struct streamer *streamer, struct bl_data *bl_data, int SG_stride, double complex *subgrid, double mid_u, double mid_v, double mid_w, int iu, int iv, int iw, bool conjugate, int it0, int it1, int if0, int if1, double min_u, double max_u, double min_v, double max_v, double min_w, double max_w, double complex *vis_data) { struct vis_spec *const spec = &streamer->work_cfg->spec; const double theta = streamer->work_cfg->theta; const int subgrid_size = streamer->work_cfg->recombine.xM_size; // Calculate l/m positions of sources in case we are meant to // check them int i; const int image_size = streamer->work_cfg->recombine.image_size; const int source_count = streamer->work_cfg->source_count; // Initialise counter to random value so we check random visibilities int source_checks = streamer->work_cfg->vis_checks; int check_counter = 0; if (source_checks > 0 && source_count > 0) { check_counter = rand() % source_checks; } else { source_checks = 0; } // Do degridding uint64_t flops = 0; uint64_t square_error_samples = 0; double square_error_sum = 0, worst_err = 0; int time; for (time = it0; time < it1; time++) { // Determine coordinates double u = uvw_lambda(bl_data, time, 0, 0); double v = uvw_lambda(bl_data, time, 0, 1); double w = uvw_lambda(bl_data, time, 0, 2); double du = uvw_lambda(bl_data, time, 1, 0) - u; double dv = uvw_lambda(bl_data, time, 1, 1) - v; double dw = uvw_lambda(bl_data, time, 1, 2) - w; // Round to w-plane? Useful for testing simple gridders if (streamer->work_cfg->vis_round_to_wplane) { w = mid_w; dw = 0; } if (conjugate) { u *= -1; du *= -1; v *= -1; dv *= -1; w *= -1; dw *= -1; } // Degrid a line of visibilities double complex *pvis = vis_data + (time-it0)*spec->freq_chunk; degrid_conv_uv_line(subgrid, subgrid_size, SG_stride, theta, u-mid_u, v-mid_v, w-mid_w, du, dv, dw, if1 - if0, min_u-mid_u, max_u-mid_u, min_v-mid_v, max_v-mid_v, min_w-mid_w, max_w-mid_w, conjugate, streamer->kern, pvis, &flops); // Check against DFT (one per row, maximum) if (source_checks > 0) { if (check_counter >= if1 - if0) { check_counter -= if1 - if0; } else { double complex vis_out = pvis[check_counter]; double check_u = u + du * check_counter; double check_v = v + dv * check_counter; double check_w = w + dw * check_counter; // Check that we actually generated a visibility here, // negate if necessary complex double vis = 0; if (check_u >= min_u && check_u < max_u && check_v >= min_v && check_v < max_v) { if (conjugate) { check_u *= -1; check_v *= -1; } // Generate visibility for (i = 0; i < source_count; i++) { double ph = check_u * streamer->work_cfg->source_lmn[i*3+0] + check_v * streamer->work_cfg->source_lmn[i*3+1] + check_w * streamer->work_cfg->source_lmn[i*3+2]; vis += cos(2*M_PI*ph) + 1.j * sin(2*M_PI*ph); } vis /= (double)image_size * image_size; } // Check error square_error_samples += 1; double err = cabs(vis_out - vis); if (err > 1e-7) { fprintf(stderr, "WARNING: uv %g/%g (sg %d/%d): %g%+gj != %g%+gj\n", u, v, iu, iv, creal(vis_out), cimag(vis_out), creal(vis), cimag(vis)); } worst_err = fmax(err, worst_err); square_error_sum += err * err; check_counter = source_checks; } } } // Add to statistics #pragma omp atomic streamer->vis_error_samples += square_error_samples; #pragma omp atomic streamer->vis_error_sum += square_error_sum; if (worst_err > streamer->vis_worst_error) { // Likely happens rarely enough that a critical section won't be a problem #pragma omp critical streamer->vis_worst_error = fmax(streamer->vis_worst_error, worst_err); } #pragma omp atomic streamer->degrid_flops += flops; #pragma omp atomic streamer->produced_chunks += 1; return flops; } bool streamer_degrid_chunk(struct streamer *streamer, struct subgrid_work *work, struct subgrid_work_bl *bl, int tchunk, int fchunk, int slot, int SG_stride, double complex *subgrid) { struct vis_spec *const spec = &streamer->work_cfg->spec; struct recombine2d_config *const cfg = &streamer->work_cfg->recombine; const double theta = streamer->work_cfg->theta; const double wstep = streamer->work_cfg->wstep; const double sg_step = streamer->work_cfg->sg_step; const double sg_step_w = streamer->work_cfg->sg_step_w; double start = get_time_ns(); // Calculate subgrid boundaries. TODO: All of this duplicates // logic that also appears in config.c (bin_baseline). This is // brittle, should get refactored at some point! double sg_mid_u = work->subgrid_off_u / theta; double sg_mid_v = work->subgrid_off_v / theta; double sg_mid_w = work->subgrid_off_w * wstep; double sg_min_u = (work->subgrid_off_u - sg_step / 2) / theta; double sg_min_v = (work->subgrid_off_v - sg_step / 2) / theta; double sg_min_w = (work->subgrid_off_w - sg_step_w / 2) * wstep; double sg_max_u = (work->subgrid_off_u + sg_step / 2) / theta; double sg_max_v = (work->subgrid_off_v + sg_step / 2) / theta; double sg_max_w = (work->subgrid_off_w + sg_step_w / 2) * wstep; if (sg_min_v > cfg->image_size / theta / 2) { sg_min_v -= cfg->image_size / theta / 2; sg_max_v -= cfg->image_size / theta / 2; } // Determine chunk size int it0 = tchunk * spec->time_chunk, it1 = (tchunk+1) * spec->time_chunk; if (it1 > spec->time_count) it1 = spec->time_count; int if0 = fchunk * spec->freq_chunk, if1 = (fchunk+1) * spec->freq_chunk; if (if1 > spec->freq_count) if1 = spec->freq_count; // Check whether time chunk fall into positive u. We use this // for deciding whether coordinates are going to get flipped // for the entire chunk. This is assuming that a chunk is // never big enough that we would overlap an extra subgrid // into the negative direction. int tstep_mid = (it0 + it1) / 2; bool positive_u = bl->bl_data->uvw_m[tstep_mid * 3] >= 0; // Check for overlap between baseline chunk and subgrid double min_uvw[3], max_uvw[3]; bl_bounding_box(bl->bl_data, !positive_u, it0, it1-1, if0, if1-1, min_uvw, max_uvw); if (!(min_uvw[0] < sg_max_u && max_uvw[0] > sg_min_u && min_uvw[1] < sg_max_v && max_uvw[1] > sg_min_v && min_uvw[2] < sg_max_w && max_uvw[2] > sg_min_w)) return false; // Determine least busy writer int i, least_waiting = 2 * streamer->vis_queue_per_writer; struct streamer_writer *writer = streamer->writer; for (i = 0; i < streamer->writer_count; i++) { if (streamer->writer[i].to_write < least_waiting) { least_waiting = streamer->writer[i].to_write; writer = streamer->writer + i; } } // Acquire a slot struct streamer_chunk *chunk = writer_push_slot(writer, bl->bl_data, tchunk, fchunk); #pragma omp atomic streamer->wait_in_time += get_time_ns() - start; start = get_time_ns(); // Do degridding const size_t chunk_vis_size = sizeof(double complex) * spec->freq_chunk * spec->time_chunk; uint64_t flops = streamer_degrid_worker( streamer, bl->bl_data, SG_stride, subgrid, sg_mid_u, sg_mid_v, sg_mid_w, work->iu, work->iv, work->iw, !positive_u, it0, it1, if0, if1, sg_min_u, sg_max_u, sg_min_v, sg_max_v, sg_min_w, sg_max_w, chunk ? chunk->vis : alloca(chunk_vis_size)); #pragma omp atomic streamer->degrid_time += get_time_ns() - start; if (chunk) { // No flops executed? Signal to writer that we can skip writing // this chunk (small optimisation) if (flops == 0) { chunk->tchunk = -2; chunk->fchunk = -2; } // Signal slot for output #ifndef __APPLE__ sem_post(&chunk->out_lock); #else dispatch_semaphore_signal(chunk->out_lock); #endif } return true; } void streamer_task(struct streamer *streamer, struct subgrid_work *work, struct subgrid_work_bl *bl, int slot, int subgrid_work, double complex *subgrid_image) { const int xM_size = streamer->work_cfg->recombine.xM_size; const int SG_stride = xM_size + 16; // Assume 16x16 is biggest possible convolution const int SG2_size = sizeof(double complex) * SG_stride * xM_size; int i; // FFT and establish proper stride for the subgrid so we don't get // cache thrashing problems when gridding moves (TODO: construct // like this right away?) double complex *subgrid = calloc(1, SG2_size); if (subgrid_image) { fftw_execute_dft(streamer->subgrid_plan, subgrid_image, subgrid); fft_shift(subgrid, xM_size); for (i = xM_size-1; i >= 0; i--) { memcpy(subgrid + SG_stride * i, subgrid + xM_size * i, sizeof(double complex) * xM_size); } } struct vis_spec *const spec = &streamer->work_cfg->spec; struct subgrid_work_bl *bl2; int i_bl2; for (bl2 = bl, i_bl2 = 0; bl2 && i_bl2 < streamer->work_cfg->vis_bls_per_task; bl2 = bl2->next, i_bl2++) { // Go through time/frequency chunks int ntchunk = (bl->bl_data->time_count + spec->time_chunk - 1) / spec->time_chunk; int nfchunk = (bl->bl_data->freq_count + spec->freq_chunk - 1) / spec->freq_chunk; int tchunk, fchunk; int nchunks = 0; for (tchunk = 0; tchunk < ntchunk; tchunk++) for (fchunk = 0; fchunk < nfchunk; fchunk++) if (streamer_degrid_chunk(streamer, work, bl2, tchunk, fchunk, slot, SG_stride, subgrid)) nchunks++; // Check that plan predicted the right number of chunks. This // is pretty important - if this fails this means that the // coordinate calculations are out of synch, which might mean // that we have failed to account for some visibilities in the // plan! if (bl2->chunks != nchunks) printf("WARNING: subgrid (%d/%d/%d) baseline (%d-%d) %d chunks planned, %d actual!\n", work->iu, work->iv, work->iw, bl2->a1, bl2->a2, bl2->chunks, nchunks); } // Done with this chunk #pragma omp atomic streamer->subgrid_tasks--; #pragma omp atomic streamer->subgrid_locks[slot]--; free(subgrid); } // Perform checks on the subgrid data. Returns RMSE if we have sources // to do the checks. static double streamer_checks(struct streamer *streamer, struct subgrid_work *work, complex double *subgrid_image) { struct recombine2d_config *const cfg = &streamer->work_cfg->recombine; // Perform Fourier transform complex double *subgrid = calloc(sizeof(complex double), cfg->SG_size); fftw_execute_dft(streamer->subgrid_plan, subgrid_image, subgrid); // Check accumulated result if (work->check_path && streamer->work_cfg->facet_workers > 0) { double complex *approx_ref = read_hdf5(cfg->SG_size, work->check_hdf5, work->check_path); double err_sum = 0; int y; for (y = 0; y < cfg->xM_size * cfg->xM_size; y++) { double err = cabs(subgrid[y] - approx_ref[y]); err_sum += err * err; } free(approx_ref); double rmse = sqrt(err_sum / cfg->xM_size / cfg->xM_size); printf("%sSubgrid %d/%d RMSE %g\n", rmse > work->check_threshold ? "ERROR: " : "", work->iu, work->iv, rmse); } fft_shift(subgrid, cfg->xM_size); // Check some degridded example visibilities if (work->check_degrid_path && streamer->kern && streamer->work_cfg->facet_workers > 0) { int nvis = get_npoints_hdf5(work->check_hdf5, "%s/vis", work->check_degrid_path); double *uvw_sg = read_hdf5(3 * sizeof(double) * nvis, work->check_hdf5, "%s/uvw_subgrid", work->check_degrid_path); int vis_size = sizeof(double complex) * nvis; double complex *vis = read_hdf5(vis_size, work->check_hdf5, "%s/vis", work->check_degrid_path); struct bl_data bl; bl.antenna1 = bl.antenna2 = 0; bl.time_count = nvis; bl.freq_count = 1; double freq[] = { c }; // 1 m wavelength bl.freq = freq; bl.vis = (double complex *)calloc(1, vis_size); // Degrid and compare bl.uvw_m = uvw_sg; degrid_conv_bl(subgrid, cfg->xM_size, cfg->xM_size, cfg->image_size, 0, 0, -cfg->xM_size, cfg->xM_size, -cfg->xM_size, cfg->xM_size, &bl, 0, nvis, 0, 1, streamer->kern); double err_sum = 0; int y; for (y = 0; y < nvis; y++) { double err = cabs(vis[y] - bl.vis[y]); err_sum += err*err; } double rmse = sqrt(err_sum / nvis); printf("%sSubgrid %d/%d degrid RMSE %g\n", rmse > work->check_degrid_threshold ? "ERROR: " : "", work->iu, work->iv, rmse); } // Check against DFT, if we are generating from sources const int source_count = streamer->work_cfg->source_count; double err_sum = 0, worst_err = 0; int err_samples = 0; int source_checks = streamer->work_cfg->grid_checks; if (source_count > 0 && source_checks > 0) { const double theta = streamer->work_cfg->theta; const double wstep = streamer->work_cfg->wstep; int iu, iv; int check_counter = rand() % source_checks; for (iv = -cfg->xA_size/2; iv < cfg->xA_size/2; iv++) { for (iu = -cfg->xA_size/2; iu < cfg->xA_size/2; iu++) { if (check_counter--) continue; check_counter = source_checks; double check_u = (work->subgrid_off_u+iu) / theta; double check_v = (work->subgrid_off_v+iv) / theta; double check_w = work->subgrid_off_w * wstep; // Generate visibility complex double vis = 0; int i; for (i = 0; i < source_count; i++) { double ph = check_u * streamer->work_cfg->source_lmn[i*3+0] + check_v * streamer->work_cfg->source_lmn[i*3+1] + check_w * streamer->work_cfg->source_lmn[i*3+2]; vis += 1/streamer->work_cfg->source_corr[i] * (cos(2*M_PI*ph) + 1.j * sin(2*M_PI*ph)); } vis /= (double)cfg->image_size * cfg->image_size; // Check double complex vis_grid = subgrid[(iv+cfg->xM_size/2) * cfg->xM_size + iu + cfg->xM_size/2]; double err = cabs(vis_grid - vis); err_sum += err * err; worst_err = fmax(worst_err, err); err_samples += 1; } } #pragma omp atomic streamer->grid_error_samples += err_samples; #pragma omp atomic streamer->grid_error_sum += err_sum; if (worst_err > streamer->grid_worst_error) { #pragma omp critical streamer->grid_worst_error = fmax(streamer->grid_worst_error, worst_err); } } free(subgrid); if (err_samples > 0) return sqrt(err_sum / err_samples) / streamer->work_cfg->source_energy; else return -1; } void streamer_work(struct streamer *streamer, int subgrid_work, double complex *nmbf) { struct recombine2d_config *const cfg = &streamer->work_cfg->recombine; struct subgrid_work *const work = streamer->work_cfg->subgrid_work + streamer->subgrid_worker * streamer->work_cfg->subgrid_max_work + subgrid_work; struct facet_work *const facet_work = streamer->work_cfg->facet_work; const int facets = streamer->work_cfg->facet_workers * streamer->work_cfg->facet_max_work; const int nmbf_length = cfg->NMBF_NMBF_size / sizeof(double complex); // Find slot to write to int slot; while(true) { for (slot = 0; slot < streamer->queue_length; slot++) if (streamer->subgrid_locks[slot] == 0) break; if (slot < streamer->queue_length) break; #pragma omp taskyield usleep(100); } double recombine_start = get_time_ns(); // Compare with reference if (work->check_fct_path) { int i0 = work->iv, i1 = work->iu; int ifacet; for (ifacet = 0; ifacet < facets; ifacet++) { if (!facet_work[ifacet].set) continue; int j0 = facet_work[ifacet].im, j1 = facet_work[ifacet].il; double complex *ref = read_hdf5(cfg->NMBF_NMBF_size, work->check_hdf5, work->check_fct_path, j0, j1); int x; double err_sum = 0; for (x = 0; x < nmbf_length; x++) { double err = cabs(ref[x] - nmbf[nmbf_length*ifacet+x]); err_sum += err*err; } free(ref); double rmse = sqrt(err_sum / nmbf_length); if (!work->check_fct_threshold || rmse > work->check_fct_threshold) { printf("Subgrid %d/%d facet %d/%d checked: %g RMSE\n", i0, i1, j0, j1, rmse); } } } // Accumulate contributions to this subgrid double complex *subgrid = subgrid_slot(streamer, slot); memset(subgrid, 0, cfg->SG_size); int ifacet; for (ifacet = 0; ifacet < facets; ifacet++) recombine2d_af0_af1(cfg, subgrid, facet_work[ifacet].facet_off_m, facet_work[ifacet].facet_off_l, nmbf + nmbf_length*ifacet); streamer->recombine_time += get_time_ns() - recombine_start; // Perform checks on result double rmse = streamer_checks(streamer, work, subgrid); struct vis_spec *const spec = &streamer->work_cfg->spec; if (spec->time_count > 0 && streamer->kern) { // Loop through baselines struct subgrid_work_bl *bl; int i_bl = 0; for (bl = work->bls; bl; bl = bl->next, i_bl++) { if (i_bl % streamer->work_cfg->vis_bls_per_task != 0) continue; // We are spawning a task: Add lock to subgrid data to // make sure it doesn't get overwritten #pragma omp atomic streamer->subgrid_tasks++; #pragma omp atomic streamer->subgrid_locks[slot]++; // Start task. Make absolutely sure it sees *everything* // as private, as Intel's C compiler otherwise loves to // generate segfaulting code. OpenMP complains here that // having a "private" constant is unecessary (requiring // the copy), but I don't trust its judgement. double task_start = get_time_ns(); struct subgrid_work *_work = work; #pragma omp task firstprivate(streamer, _work, bl, slot, subgrid_work, subgrid) streamer_task(streamer, _work, bl, slot, subgrid_work, subgrid); #pragma omp atomic streamer->task_start_time += get_time_ns() - task_start; } if (rmse >= 0) { printf("Subgrid %d/%d/%d (%d baselines, rmse %.02g)\n", work->iu, work->iv, work->iw, i_bl, rmse); } else { printf("Subgrid %d/%d/%d (%d baselines)\n", work->iu, work->iv, work->iw, i_bl); } fflush(stdout); streamer->baselines_covered += i_bl; } }
nested-3.c
#include <omp.h> #include <stdlib.h> #include <string.h> int main (void) { int e[3]; memset (e, '\0', sizeof (e)); omp_set_nested (1); omp_set_dynamic (0); if (omp_in_parallel () || omp_get_level () != 0 || omp_get_ancestor_thread_num (0) != 0 || omp_get_ancestor_thread_num (-1) != -1 || omp_get_ancestor_thread_num (1) != -1 || omp_get_team_size (0) != 1 || omp_get_team_size (-1) != -1 || omp_get_team_size (1) != -1 || omp_get_active_level () != 0) abort (); #pragma omp parallel num_threads (4) { int tn1 = omp_get_thread_num (); if (omp_in_parallel () != 1 || omp_get_num_threads () != 4 || tn1 >= 4 || tn1 < 0 || omp_get_level () != 1 || omp_get_ancestor_thread_num (0) != 0 || omp_get_ancestor_thread_num (1) != tn1 || omp_get_ancestor_thread_num (-1) != -1 || omp_get_ancestor_thread_num (2) != -1 || omp_get_team_size (0) != 1 || omp_get_team_size (1) != omp_get_num_threads () || omp_get_team_size (-1) != -1 || omp_get_team_size (2) != -1 || omp_get_active_level () != 1) #pragma omp atomic e[0] += 1; #pragma omp parallel if (0) num_threads(5) firstprivate(tn1) { int tn2 = omp_get_thread_num (); if (omp_in_parallel () != 1 || omp_get_num_threads () != 1 || tn2 != 0 || omp_get_level () != 2 || omp_get_ancestor_thread_num (0) != 0 || omp_get_ancestor_thread_num (1) != tn1 || omp_get_ancestor_thread_num (2) != tn2 || omp_get_ancestor_thread_num (-1) != -1 || omp_get_ancestor_thread_num (3) != -1 || omp_get_team_size (0) != 1 || omp_get_team_size (1) != 4 || omp_get_team_size (2) != 1 || omp_get_team_size (-1) != -1 || omp_get_team_size (3) != -1 || omp_get_active_level () != 1) #pragma omp atomic e[1] += 1; #pragma omp parallel num_threads(2) firstprivate(tn1, tn2) { int tn3 = omp_get_thread_num (); if (omp_in_parallel () != 1 || omp_get_num_threads () != 2 || tn3 > 1 || tn3 < 0 || omp_get_level () != 3 || omp_get_ancestor_thread_num (0) != 0 || omp_get_ancestor_thread_num (1) != tn1 || omp_get_ancestor_thread_num (2) != tn2 || omp_get_ancestor_thread_num (3) != tn3 || omp_get_ancestor_thread_num (-1) != -1 || omp_get_ancestor_thread_num (4) != -1 || omp_get_team_size (0) != 1 || omp_get_team_size (1) != 4 || omp_get_team_size (2) != 1 || omp_get_team_size (3) != 2 || omp_get_team_size (-1) != -1 || omp_get_team_size (4) != -1 || omp_get_active_level () != 2) #pragma omp atomic e[2] += 1; } } } if (e[0] || e[1] || e[2]) abort (); return 0; }
bks_fmt_plug.c
/* * This software is Copyright (c) 2016, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_bks; #elif FMT_REGISTERS_H john_register_one(&fmt_bks); #else #include <string.h> #include "arch.h" #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "hmac_sha.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "twofish.h" #include "sha.h" #include "loader.h" #include "simd-intrinsics.h" #include "pkcs12.h" #include "memdbg.h" #define FORMAT_LABEL "BKS" #define FORMAT_NAME "" #define ALGORITHM_NAME "PKCS12 PBE " SHA1_ALGORITHM_NAME #define PLAINTEXT_LENGTH 31 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(ARCH_WORD_32) #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #if !defined(SIMD_COEF_32) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #else #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #endif #define FORMAT_TAG "$bks$" #define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define MAX_STORE_DATA_LENGTH 8192 // XXX ensure this is large enough static struct fmt_tests tests[] = { {"$bks$0$1$20$2036$20$a2c6157bea089967ccfa13670ae992a1265ab7b5$01001a636861726c65732070726f78792073736c2070726f7879696e6700000140737320ac000000000005582e353039000004623082045e30820346a003020102020101300d06092a864886f70d01010505003081913123302106035504030c1a436861726c65732050726f78792053534c2050726f7879696e6731243022060355040b0c1b687474703a2f2f636861726c657370726f78792e636f6d2f73736c3111300f060355040a0c08584b3732204c74643111300f06035504070c084175636b6c616e643111300f06035504080c084175636b6c616e64310b3009060355040613024e5a3020180f31383939313233313132303030305a170d3338303932343033313930355a3081913123302106035504030c1a436861726c65732050726f78792053534c2050726f7879696e6731243022060355040b0c1b687474703a2f2f636861726c657370726f78792e636f6d2f73736c3111300f060355040a0c08584b3732204c74643111300f06035504070c084175636b6c616e643111300f06035504080c084175636b6c616e64310b3009060355040613024e5a30820122300d06092a864886f70d01010105000382010f003082010a02820101008349587455efb272e397a31d3b52d9b13115c93f320766d2d451117f45c40285506027079ed439cabb94d44f1ae136eb1e79bf77abe43345ad1d436809cf9e035c439272f3ca917dcadd7fbd0e3929f1a345f0b89096130bbd116f8d3ab5655789b7b0831325bd22903f198da6bdda30c08dfd17ce9ab51c48555264307bcf789a2b6c48df4ecaf3ea2c092ee737ad8f397900ac03303bfe2ae43549030a7866cb6fe9b04b9f6ec498b4e7369e99b45491bf093858a77c72f8adc818e018d413265e39446be514f78eb57a23aa88f630776f861a9163e04ad38ee8a5c9219d0fc23f6b9a6324455dea6f4a6a251eca1fa3d6288cb89fd12a2062a3a015a56f250203010001a381bc3081b9300f0603551d130101ff040530030101ff307706096086480186f842010d046a136853534c2050726f7879696e6720697320656e61626c656420696e20436861726c65732050726f78792e20506c6561736520766973697420687474703a2f2f636861726c657370726f78792e636f6d2f73736c20666f72206d6f726520696e666f726d6174696f6e2e300e0603551d0f0101ff040403020204301d0603551d0e04160414bb27f4cb2eb6dbb058101bbd803f38d208d76129300d06092a864886f70d010105050003820101000041f935f30b209e56360f7e3d9c30314a213323c47edcea1467600a50ffe4e8e39dfca8c8d34463c34745ff04c870f1df28bb772db0cf1bca677b70842c742bc6d5fb00559ad643c6bf2c95bd0b855a961d7d6a3eada9c642e9a789474c4ad838c6f732d8d859548d30829df7a32d098fe3f00147daf08c0b37dd597184c1e27a61ea42050c73994e809013cb21e37bf84bf923bcefea6164fd28ab9058ccc48f1f486fc1c47ebd8a9c933f542401b11f36a003e47b141a41c7b326d18d023e11edb445699aa44800254ea33f174fd5eb1ccce6a09365751ff905988c06315b5575067bf65ec24cad1a6a601846d1d2f51f1f420a2762990b044000619d1c8400$3b798574df20a2be48edb0b0c687cce2cf5c293c", "secret"}, // https://github.com/doublereedkurt/pyjks/blob/master/tests/keystores/bks/christmas.bksv1 {"$bks$0$1$20$1730$20$a9e6ba49c14bd8fd2c973d48f0241a4208effcfd$020009706c61696e5f6b657900000154b6ca8fa5000000000200035241570003444553000000084cf2fe915d082a430400127365616c65645f707269766174655f6b657900000154b6ca8f6e000000010005582e3530390000019c3082019830820101a003020102020100300d06092a864886f70d01010b050030123110300e06035504030c0752534131303234301e170d3136303531353233343030385a170d3138303531353233343030385a30123110300e06035504030c075253413130323430819f300d06092a864886f70d010101050003818d0030818902818100b7201edbbf265bb253c299533704df2c990978c16e97a04b9556a6af1df11e60e5e138502fc1337879dfdd4461ede4f08b3303fd1b80befb1be09d9c3fcffc2c1caeb9c83d0142dac39d7c341bd4bc07b7fee23c162941f4b4fb221d9f93388cce2b21beffdd458be9244babf34e28f25ae620b4b883617bb5e9851364c0dd350203010001300d06092a864886f70d01010b05000381810097c7fb5997212b8ff7afa863be886d1d1a0947b7392d83a6304b60cbb76a9f7172095f123254aafdd315d933650993df354f82ac85d9467178ba8eda397149cf0df4dbba9d7bdbdffd83b710c2c8c8bf25ef4dda3d49cca820159eeb97b133c5f324219b4d1294d524a85d5e6b77e38f42814052f5134a938d29342b21bed1a4000002ac00000014ad6391270981b833ba68bbfa225f087ab7e345240000075cea5b86ca6a5e80e0b1fd06a745bcc2bef875e4746db35c59a00bbac4f398cf202fe97b60813848b21c7a36f3faccac560bd05506f5b44322ed519af34190bdf10905e81d8569c3c32db3238bfefee5328c21883d82c6b9a9e07cf975aa8559368b2f9212a7e01103c21c5136e6da0b0ecdd8bcba1e071f9f084d59349c18d4e6af418ab0ca9ce73fa4bd38e1bf84e809f001069f9c821d3ee44bb23182d229b782607bd47e68e7ff299ed1e28f7cb3cb03af5ef90711db5b306ec592a2bef7a5559a06290eaab19ab77a02caf3297e24bdc1aeb14d99d2e838863f355c738d91e4496e4f10f5a7bde22c0425524e164f198407ad99d5433fdbd6bd2adf50bbe2c909ae4e18effa5fb62059614aa646639fe4963f5e34d33b030e708fe5a816cb3d9596a0e394114f2622ce7694174e2399de1e04ae42022cceb2f5da0c273f7fc6f45bd7991b8e85df594a171a0fe64a73e1b9a0f57492eaaa35d5498c7ebbd28fcd23c12a006fc147cd5209168eadf40b53d4196066352e7cf562858c62dd746f43c48542d174857d03142a680b975948ebba31ad1010d1a7fbf8cbc3c0b16c376d8567212666aa01a420db3028695c289f0625e4d8fb872c358fb9e821c6346055d7c04ec545688cd011e9f60ba2e80959ec8fed703044ea7422d0ce4f7a401a05db9afc30b39c64b0e118599f39124df1c28298ee625a3c0095e0ecb14ae72dacc8106a52a258ea1e2554005bfa7c4e90e1a999a949e6bead7f333de2bb036b047c86cb1e6c8931d189b07647e500c04f8c4772fa630c328d60b0cb3a9e209ba0e574194dd96f4f4e6465273ae7c6c5d73eec505da065294803971584a60b2222bff62b36ad59cfcff893999ff484f2849186284303c1c2369445d466850ec7737d8313229af62576bfc2962284650400117365616c65645f7075626c69635f6b657900000154b6ca8f9500000000000000d400000014a39c93f59d151905a421db0646b31841d7ddb375000004e5faab6bfd6d6ddef0253fb9669a8ce679b583b4d8e18d42a62504e916c9647ff3b00eaad96e5410ab11bbf642e297c60954bdb065da4db0a69e0b2e6baa5ed8361939d4aec7599919d20cbaf05483655a4a5bb7fcc1b7ff33196a7df54779245d14f2f68636d1983d03c2af9ebbc78ed57116f58b019d810a8d9f03b45b4d56b2fbfeab0f7c8d506e1add5cd83fe06be0873cff5a198d213085df165dbf8d98371b9bf2d9ecc67a91f8ac731eecdbc7c46661fe4efb470da50100046365727400000154b6ca8f6e000000000005582e3530390000019c3082019830820101a003020102020100300d06092a864886f70d01010b050030123110300e06035504030c0752534131303234301e170d3136303531353233343030385a170d3138303531353233343030385a30123110300e06035504030c075253413130323430819f300d06092a864886f70d010101050003818d0030818902818100b7201edbbf265bb253c299533704df2c990978c16e97a04b9556a6af1df11e60e5e138502fc1337879dfdd4461ede4f08b3303fd1b80befb1be09d9c3fcffc2c1caeb9c83d0142dac39d7c341bd4bc07b7fee23c162941f4b4fb221d9f93388cce2b21beffdd458be9244babf34e28f25ae620b4b883617bb5e9851364c0dd350203010001300d06092a864886f70d01010b05000381810097c7fb5997212b8ff7afa863be886d1d1a0947b7392d83a6304b60cbb76a9f7172095f123254aafdd315d933650993df354f82ac85d9467178ba8eda397149cf0df4dbba9d7bdbdffd83b710c2c8c8bf25ef4dda3d49cca820159eeb97b133c5f324219b4d1294d524a85d5e6b77e38f42814052f5134a938d29342b21bed1a403000c73746f7265645f76616c756500000154b6ca8f9f0000000000000009020305070b0d1113170400117365616c65645f7365637265745f6b657900000154b6ca8f9a000000000000003c00000014b3cd07a06cc6354ffaae1a63a297e46ed8791ff800000437fc0a1b31876167b84e1d85b00dfdae0ee0ad42ad3cfdae41f8022e1a719f56eb00$fdf1915288bcaa30ad5192bcc327db290b1c21e0", "12345678"}, // https://github.com/doublereedkurt/pyjks/blob/master/tests/keystores/bks/christmas.bksv2 {"$bks$0$2$160$1141$20$de18c5bf26bbce0c7a3e6b9685f3028c3a58c5c2$020009706c61696e5f6b657900000154b6ca8fe3000000000200035241570003444553000000084cf2fe915d082a430400127365616c65645f707269766174655f6b657900000154b6ca8fbd000000010005582e3530390000019c3082019830820101a003020102020100300d06092a864886f70d01010b050030123110300e06035504030c0752534131303234301e170d3136303531353233343030385a170d3138303531353233343030385a30123110300e06035504030c075253413130323430819f300d06092a864886f70d010101050003818d0030818902818100b7201edbbf265bb253c299533704df2c990978c16e97a04b9556a6af1df11e60e5e138502fc1337879dfdd4461ede4f08b3303fd1b80befb1be09d9c3fcffc2c1caeb9c83d0142dac39d7c341bd4bc07b7fee23c162941f4b4fb221d9f93388cce2b21beffdd458be9244babf34e28f25ae620b4b883617bb5e9851364c0dd350203010001300d06092a864886f70d01010b05000381810097c7fb5997212b8ff7afa863be886d1d1a0947b7392d83a6304b60cbb76a9f7172095f123254aafdd315d933650993df354f82ac85d9467178ba8eda397149cf0df4dbba9d7bdbdffd83b710c2c8c8bf25ef4dda3d49cca820159eeb97b133c5f324219b4d1294d524a85d5e6b77e38f42814052f5134a938d29342b21bed1a4000002ac00000014755493361b0d7e3d4daed45a75199454cfe61585000005acae83870b92b606cc44facf5ce35598be2da231bf4ae80da2c2157f89e841d27e98416e569fca7842acf0542f6b4027682c565bcad8a169c4a641929f19be3935446cf41c5994bf8a5b3fef45b640dacd9396e501a957e5d8d0a93bdf308640378788b6c0be629a8b6bd8cfa874ee3d71998ff6dd550a7865e36b373981ca42a70c0228193b717a98ea0da7817cb6f8f7d4fc4061acb6a39eb4bce14b98a94388f28ddd87dfbc13ac925fb2c1dd7a907485a34d66832b72eb92cb8e60c09c17b3a95769a81e773c248256146f5c3c8f04ffb26a75a8314c9c3be058963c9907a52e2f05cc1ca4921ac0b34ec8f6a1b27013f57399c20c681ef3f0f6f5a2879a2af4a7e746d2ddfd0b33917abd005dc59225fda2a7d1fe026ce8014e85bfc8e23932b3ae6af53a6ebe52def1f89942ec899eff2e74324ce45329c02114003dfc26211fdc3a0f3a9045331c2b0fe20c0985e4e24725c959c19c87b1679f76c2aefe447413adddf992bdc143b023373c894ebed1e2106236ebbbc8bf6d71c5770c50aab7c38e054299391ccaba845b11f88f40e12d126fa94584621921355caf293e876b71ba9e74d7577a83737ca92e581454bd1f6ea271379846aca2dc032d53181d7b8f61f98cbc43215023b512b39176650db6dabb6d038f7890d1d730da57e2f4595e9edc6466cf1eca12fd35413834a957cc341181439de13971c67794d8fadf2bcb296cf8e1a2058957af2a194e32599626f86d4dae838f944c7d495c9a36c72190fdd2cf55bafd12134383832bb8c23597d0dcc87754f575e77ccc802ba0f3d7662f01db277d95bcddec55927af67753e07c0d4ee509465a122c6b0adb097d92158479d4836dc2e03b3a493d44dda8eb9895327216460d2926e122868af3f6f82db073af041b66668e2e16b882690400117365616c65645f7075626c69635f6b657900000154b6ca8fcc00000000000000d400000014921f89acc2d3f90372e5b61a04294b6efb34075100000434a69feb6389d6bb8b561139a8c877ae1ae87167a709008e55350abbdd8fa0a77dbe1609e2685fcae5c42825c7dc65ac0dc2fce4a4d8688b71eaaa094461cadce7c74d9a4bfb507d7fb4d31567d934f5e69829c12bdccc494876fd3fa12bde21c3c1525e05c580c87a4272212f0c3bce9fc9a32716ebfb50333563529a2bbc92a56a811c083135ed2fc38d4a203d3bc2fa3cddeac9e8d6cbc760beaedb8dd9312e8d30be0f6976f9ab7681c5902ab210c2d11202d9181a8d250100046365727400000154b6ca8fbd000000000005582e3530390000019c3082019830820101a003020102020100300d06092a864886f70d01010b050030123110300e06035504030c0752534131303234301e170d3136303531353233343030385a170d3138303531353233343030385a30123110300e06035504030c075253413130323430819f300d06092a864886f70d010101050003818d0030818902818100b7201edbbf265bb253c299533704df2c990978c16e97a04b9556a6af1df11e60e5e138502fc1337879dfdd4461ede4f08b3303fd1b80befb1be09d9c3fcffc2c1caeb9c83d0142dac39d7c341bd4bc07b7fee23c162941f4b4fb221d9f93388cce2b21beffdd458be9244babf34e28f25ae620b4b883617bb5e9851364c0dd350203010001300d06092a864886f70d01010b05000381810097c7fb5997212b8ff7afa863be886d1d1a0947b7392d83a6304b60cbb76a9f7172095f123254aafdd315d933650993df354f82ac85d9467178ba8eda397149cf0df4dbba9d7bdbdffd83b710c2c8c8bf25ef4dda3d49cca820159eeb97b133c5f324219b4d1294d524a85d5e6b77e38f42814052f5134a938d29342b21bed1a403000c73746f7265645f76616c756500000154b6ca8fe30000000000000009020305070b0d1113170400117365616c65645f7365637265745f6b657900000154b6ca8fd1000000000000003c000000146c18877619eac9f77da0d86bd8a18639eb084f5f000004bd0c558b25c1657b8b9c25079e64196fe43e9fec0ef5d44b2d8a0dbd09c92a8b4e00$3b99d6fd87755af63606414be2b75b9cfa3751c7", "12345678"}, // christmas.uber {"$bks$1$1$20$1141$20$fcc7b038c0ca3e1b99e0bc1192ed999a66129a2d$c561a20373785bfa46d530192cfe16c3edf9ccacc75e53d2c1c7bafd64c77d1aea9c52817d9a93224bf49cca1273de0856d32a82f4ce97b550abb98fd9f82297814784774396bea9fb3282fdb75b33ed59d52a5eefc2486b0726dc2156ca8257f41d033f8c41276ce78c2155b80eeb97d9a3d8e065a73bb9d5ab1840d60ea56cc04b00a87346d8a580829b9e437869b4a39626b1d17e169589de1e78e8cd6261dcd8b48a3ae52b89f90b2af60f395aff5cca0281c5b6f5b4dcdb8d9a30090e41dc033c0a0426d03c35ec1264c5ca1710c32d69fe6f222cd913f56392d3c4e3c80a0a6118bd4054a3f932728b9f855ac3ed45f1ec9209c2be6e807ec427e576781244df751f52f858c23a7985ed667ed739eee6151ad0d28f520406eb30a8c27da2fd5cdb471cd73e5c1f0c746527414a65efa39b336aeb43f03c556f01e4aec7f464313d4f238316ab229d854bc48e6dad068c9a56a00f2c188cea1baeea08420f1ee82789a089678b9bf134b95147c83f0962e5f45c96e5a9b43628c4df0f415885f857932d9344c409a25d8ff7918d228ddc25a5940f18f00f1a83b7ccbe520dca92eb9a360857857c46a70c0becfff0a66286488313835406e6e9e9053d1e139226e82471171f1748a0447b2efd015d87399118c548a048270c61034c832265bd9104be3ea7910c9e730c2d2ddfb5edc761634a388ae364c91b2662ce4e437e0f954cf14dee83e01dec7c7aeb8d0c63ed099e8ec28aa64b54368159ec819d69fb028554ad32af7318602b98fa2d0cfed206ae8973a6c305c80f8972f9b245808364d599c9cd6847ba2ee44e9fe07be2c2323ea28ff8d8965036849085d9947153730a367e955b67d195510c6a73993c403224bc5877bcf04be2b9c42c7fe3ea28c953e4c21278b3e12f21a541758f41b08689eba4985ad18e6e113f3abef6479df04aac104079ab8677cfeae33bc090d47f7f17c1386bdf099282fa079a48ce4f94d661f4ec762892faf680131f0372555f3436ac7b364eb64570d1b46e3d8ebd97236d01abe217c8a95ab0ba097a56f45f96c18b777343e7214748262f083a9fdcd4e4331e50d17dc58dfedfa459e87bd71be8a09283b25b1a0e6daba7d03ca2580c02edcae8e43f2d8ebdff37385bdcfa73f057d636c970278a01b7a1f02878e1961ef5a7fab4bd2e788bf356688f5b47d1573db600d91474c5a802ce27c789fd02df30ab719f8bcd7c58eb5bd45b9e20d1bb2f26c1f3cdd32247dd7268f56cb187734e0d977a5b2aef80622960156042b65448eee980f7d77d8b2519aa5f216a6300c8b534220feaf56ade0369e5082fc4dd623f134abef8b8253b4e7dc8a9a40188598b18512ba77d009c5fbdd2159fd8707dfea97b1e7e8c4297dd7499aaaa7c09097f057badc385e9de29a2b668529fc280f6cea1078d0d79834763a24ba38f19e4654dfe7cbb9ff08f122509433a3d9d055b864c8a9a88518fd93e50ee9c7f678aaf52c25f3f4858996add60e6c21204c1e22fbb02b9090578cc1f42861dc93f955d81f00decee8fe7405367ee986835b0be865084964226e47c77c764b823421383299e76fbcfd64c2ad085681110f772e4c2f526eab0d6a7a771814613f745230ac8bd1055c5612f17cb3b5c4459b809f082bb11bfdee8d1db48d0616e51ee77594ad92a417027665beb208888f5b030a022bf9850d69d18e883fcb4c47274db3708d5a5c59bc1e25842de6dce350d1c2c16d7f9c7103ed15c6f25508cc27199569d24bf7e55e960a0006e177470a1a3cc33f540324565c0a67d20a2c7b8389e8aa375e4b4515d451ce0dce43cf99a2a6f2f6adba5206c243a8aa2a974a2737923b61f8c3c86253fe896fd9c8ab9a66f17086dde38b539cb23a2d7e261299d8dc12639cdbd56a5813026a3670f9ac79346b302b00f1c3d41fd566d9e3a3ad2d756948f5f97e1967fcc1410a1078191fb89b51a137a70ae505dfa65a96b9e4d70f562b4f2c715c1f782fd79d02f98aead88f1eb5d8722b4fef4699218f003f99270b852a0f892f9f23827820900288a4a1fa5978b44bd3035de46f6aa8a3e32ade38190756658836c0fde5b9b34a16a8e97cdcbace80bb86d88dceaeb9a2b87ff2a02bd004bb1d4f08232b91b476b3ca0873ef132b0a70d939a11c4fe66bc2b113eba14c2e4728499441b6f26ec014968c95e71ff23d33c4267c67af21b393a770e03a8781b10159a20cfceab34dc592a394e11fd649cfe71eaf7ef7de863d1a6eff623a67b7cc94b114cc646df2380e4851c614eb54d59476419964063d9f85661c079719ba4d2ce5872820f18a34f384dd48fb20907c8ffa521ef3e6a4302524c442a2230f8b574a82e78f1e699d3b2621132bda6a3238e9b41bd773d3d476dfc11ef549683b789e286a54d4c8ea01a727f9bdbfd6310e8882b2ed26b75edc9958cabe3beed1cc167c7f2ab759326b0e4026ac63e2489ea4da6814fbf547a6637fc1d0fc78f79c5517b7e329ea226c2d29281cac160a7709953ab1dcdb731e68b940106bee0d9d68720b66d2953ad3d2185596850dbb4cfc3654161fbfee8be7258b9a46a82e8c8c908fc93c235b9ae101b4e66c7b52434fe6c4147f035cd5d42a4a8b047d33d907e89018a994f343f241643ad1273ca8d486de956931e7f94c197460ec09c100fb27ef98d1e57e1cb736adb943a83193ce586aa5c61e5c0bd25f5fd846beb9cb8212643b752e0373f17a0061a5b350b4d06b405c5c0949b5733996eb67262893ecb4bd1213a04ad4c7900b441c103250a01b41075b70f13c26937b149e41c180f55b8d472273679032c1a16058d05e43fedcde7164527ca82b160cc9deb53675bbe0db70a47a5decc64f500d9dd7686240366$0000000000000000000000000000000000000000", "12345678"}, {NULL} }; #ifdef _MSC_VER #define custom_salt bks_custom_salt #define cur_salt bks_cur_salt #endif static struct custom_salt { int format; // 0 -> BKS keystore int version; // BKS version int hmac_key_size; int iteration_count; int saltlen; unsigned char salt[20]; int store_data_length; unsigned char store_data[MAX_STORE_DATA_LENGTH]; unsigned char store_hmac[20]; } *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static size_t *saved_len; static int *cracked, any_cracked; // "cracked array" approach is required for UBER keystores static void init(struct fmt_main *self) { #ifdef _OPENMP static int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); cracked = mem_calloc(self->params.max_keys_per_crypt, sizeof(*cracked)); Twofish_initialise(); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_len); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p = ciphertext, *ctcopy, *keeptr; int format, version, saltlen, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) // format goto bail; if (!isdec(p)) goto bail; format = atoi(p); if (format != 0 && format != 1) // 0 -> BKS keystore, 1 -> UBER keystore goto bail; if ((p = strtokm(NULL, "$")) == NULL) // version goto bail; if (!isdec(p)) goto bail; version = atoi(p); if (version != 1 && version != 2) // BKS, BKS-v1 goto bail; if ((p = strtokm(NULL, "$")) == NULL) // hmac_key_size goto bail; if (!isdec(p)) goto bail; if ((p = strtokm(NULL, "$")) == NULL) // iteration_count goto bail; if (!isdec(p)) goto bail; if ((p = strtokm(NULL, "$")) == NULL) // saltlen goto bail; if (!isdec(p)) goto bail; saltlen = atoi(p); if (saltlen > 20) goto bail; if ((p = strtokm(NULL, "$")) == NULL) // salt goto bail; if (hexlenl(p, &extra) > saltlen * 2 || extra) goto bail; if (!ishexlc(p)) goto bail; if ((p = strtokm(NULL, "$")) == NULL) // store_data goto bail; if (hexlenl(p, &extra) > MAX_STORE_DATA_LENGTH * 2 || extra) goto bail; if (!ishexlc(p)) goto bail; if ((p = strtokm(NULL, "$")) == NULL) // store_hmac goto bail; if (hexlenl(p, &extra) != 20*2 || extra) goto bail; if (!ishexlc(p)) goto bail; p = strrchr(ciphertext, '$'); if (!p) goto bail; p = p + 1; if (!ishexlc(p)) goto bail; MEM_FREE(keeptr); return 1; bail: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; int i; char *p = ciphertext, *ctcopy, *keeptr; memset(&cs, 0, sizeof(cs)); ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LENGTH; p = strtokm(ctcopy, "$"); cs.format = atoi(p); p = strtokm(NULL, "$"); cs.version = atoi(p); p = strtokm(NULL, "$"); cs.hmac_key_size = atoi(p); p = strtokm(NULL, "$"); cs.iteration_count = atoi(p); p = strtokm(NULL, "$"); cs.saltlen = atoi(p); p = strtokm(NULL, "$"); for(i = 0; i < cs.saltlen; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])]; p = strtokm(NULL, "$"); cs.store_data_length = hexlenl(p, 0) / 2; for(i = 0; i < cs.store_data_length; i++) cs.store_data[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])]; p = strtokm(NULL, "$"); if (cs.format == 0) { // BKS keystore for(i = 0; i < 20; i++) cs.store_hmac[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])]; } MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int index; const int count = *pcount; if (any_cracked) { memset(cracked, 0, sizeof(*cracked) * count); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #if !defined(SIMD_COEF_32) if (cur_salt->format == 0) { unsigned char mackey[20]; int mackeylen = cur_salt->hmac_key_size / 8; // mackeylen is only 2 bytes, and this results in lot // of collisions (which work just fine) // // FMT_NOT_EXACT can be turned on for BKS keystores // for finding more possible passwords unsigned char store_hmac_calculated[20]; pkcs12_pbe_derive_key(1, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_MAC_KEY, (unsigned char*)saved_key[index], saved_len[index], cur_salt->salt, cur_salt->saltlen, mackey, mackeylen); hmac_sha1(mackey, mackeylen, cur_salt->store_data, cur_salt->store_data_length, store_hmac_calculated, 20); if (!memcmp(store_hmac_calculated, cur_salt->store_hmac, 20)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } else if (cur_salt->format == 1) { unsigned char compute_checkum[20]; unsigned char iv[16]; unsigned char key[32]; Twofish_key tkey; int datalen = 0; unsigned char store_data_decrypted[MAX_STORE_DATA_LENGTH]; SHA_CTX ctx; pkcs12_pbe_derive_key(1, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_IV, (unsigned char*)saved_key[index], saved_len[index], cur_salt->salt, cur_salt->saltlen, iv, 16); pkcs12_pbe_derive_key(1, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_KEY, (unsigned char*)saved_key[index], saved_len[index], cur_salt->salt, cur_salt->saltlen, key, 32); Twofish_prepare_key(key, 32, &tkey); datalen = Twofish_Decrypt(&tkey, cur_salt->store_data, store_data_decrypted, cur_salt->store_data_length, iv); if (datalen < 0) continue; SHA1_Init(&ctx); SHA1_Update(&ctx, store_data_decrypted, datalen - 20); SHA1_Final(compute_checkum, &ctx); if (!memcmp(compute_checkum, store_data_decrypted + datalen - 20, 20)) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } #else size_t lens[SSE_GROUP_SZ_SHA1], j; const unsigned char *keys[SSE_GROUP_SZ_SHA1]; // Load keys, and lengths for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) { lens[j] = saved_len[index+j]; keys[j] = (const unsigned char*)(saved_key[index+j]); } if (cur_salt->format == 0) { unsigned char *mackey[SSE_GROUP_SZ_SHA1], real_keys[SSE_GROUP_SZ_SHA1][20]; int mackeylen = cur_salt->hmac_key_size / 8; // mackeylen is only 2 bytes, and this results in lot // of collisions (which work just fine) // // FMT_NOT_EXACT can be turned on for BKS keystores // for finding more possible passwords unsigned char store_hmac_calculated[20]; for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) mackey[j] = real_keys[j]; pkcs12_pbe_derive_key_simd(1, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_MAC_KEY, keys, lens, cur_salt->salt, cur_salt->saltlen, mackey, mackeylen); for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) { hmac_sha1(mackey[j], mackeylen, cur_salt->store_data, cur_salt->store_data_length, store_hmac_calculated, 20); if (!memcmp(store_hmac_calculated, cur_salt->store_hmac, 20)) { cracked[index+j] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } } else if (cur_salt->format == 1) { unsigned char iv_[SSE_GROUP_SZ_SHA1][16], *iv[SSE_GROUP_SZ_SHA1]; unsigned char ckey_[SSE_GROUP_SZ_SHA1][32], *ckey[SSE_GROUP_SZ_SHA1]; Twofish_key tkey; int datalen = 0; unsigned char store_data_decrypted[MAX_STORE_DATA_LENGTH]; SHA_CTX ctx; for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) { iv[j] = iv_[j]; ckey[j] = ckey_[j]; } pkcs12_pbe_derive_key_simd(1, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_IV, keys, lens, cur_salt->salt, cur_salt->saltlen, iv, 16); // lengths get tromped on, so re-load them for the load keys call. for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) lens[j] = saved_len[index+j]; pkcs12_pbe_derive_key_simd(1, cur_salt->iteration_count, MBEDTLS_PKCS12_DERIVE_KEY, keys, lens, cur_salt->salt, cur_salt->saltlen, ckey, 32); for (j = 0; j < SSE_GROUP_SZ_SHA1; ++j) { unsigned char compute_checkum[20]; Twofish_prepare_key(ckey[j], 32, &tkey); datalen = Twofish_Decrypt(&tkey, cur_salt->store_data, store_data_decrypted, cur_salt->store_data_length, iv[j]); if (datalen < 0) continue; SHA1_Init(&ctx); SHA1_Update(&ctx, store_data_decrypted, datalen - 20); SHA1_Final(compute_checkum, &ctx); if (!memcmp(compute_checkum, store_data_decrypted + datalen - 20, 20)) { cracked[index+j] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } } #endif } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *key, int index) { saved_len[index] = strnzcpyn(saved_key[index], key, sizeof(saved_key[index])); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_bks = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */ }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */ }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
trilinos_block_builder_and_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_TRILINOS_BLOCK_BUILDER_AND_SOLVER ) #define KRATOS_TRILINOS_BLOCK_BUILDER_AND_SOLVER /* System includes */ #include <set> /* External includes */ #include "boost/timer.hpp" /* Project includes */ #include "includes/define.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "Epetra_MpiComm.h" //trilinos includes #include "Epetra_Map.h" #include "Epetra_Vector.h" #include "Epetra_FECrsGraph.h" #include "Epetra_FECrsMatrix.h" #include "Epetra_IntSerialDenseVector.h" #include "Epetra_IntVector.h" #include "Epetra_SerialDenseMatrix.h" #include "Epetra_SerialDenseVector.h" #include "EpetraExt_RowMatrixOut.h" #include "EpetraExt_MultiVectorOut.h" #include "Epetra_Import.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /** Short class definition. Detail class definition. Current class provides an implementation for standard builder and solving operations. the RHS is constituted by the unbalanced loads (residual) Degrees of freedom are reordered putting the restrained degrees of freedom at the end of the system ordered in reverse order with respect to the DofSet. Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information. Calculation of the reactions involves a cost very similiar to the calculation of the total residual \URL[Example of use html]{ extended_documentation/no_ex_of_use.html} \URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf} \URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc} \URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps} \URL[Extended documentation html]{ extended_documentation/no_ext_doc.html} \URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf} \URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc} \URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps} */ template<class TSparseSpace, class TDenseSpace, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class TrilinosBlockBuilderAndSolver : public BuilderAndSolver< TSparseSpace,TDenseSpace, TLinearSolver > { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION( TrilinosBlockBuilderAndSolver ); typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver > BaseType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor. */ TrilinosBlockBuilderAndSolver( Epetra_MpiComm& Comm, int guess_row_size, typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >(pNewLinearSystemSolver) , mrComm(Comm),mguess_row_size(guess_row_size) { /* std::cout << "using the standard builder and solver " << std::endl; */ } // TrilinosBlockBuilderAndSolver( // ) // : BaseType(typename LinearSolver<TSparseSpace,TDenseSpace>::Pointer(new LinearSolver<TSparseSpace,TDenseSpace>)) // { // // /* std::cout << "using the standard builder and solver " << std::endl; */ // // } /** Destructor. */ virtual ~TrilinosBlockBuilderAndSolver() {} /*@} */ /**@name Operators */ /*@{ */ //************************************************************************** //************************************************************************** void Build( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& b) override { KRATOS_TRY if(!pScheme) KRATOS_ERROR << "No scheme provided!"; //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero(*BaseType::mpReactionsVector); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // int rank = A.Comm().MyPID(); //getting the processor Id ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental contribution pScheme->CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution TSparseSpace::AssembleLHS(A,LHS_Contribution,EquationId); TSparseSpace::AssembleRHS(b,RHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } LHS_Contribution.resize(0,0,false); RHS_Contribution.resize(0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution TSparseSpace::AssembleLHS(A,LHS_Contribution,EquationId); TSparseSpace::AssembleRHS(b,RHS_Contribution,EquationId); } //finalizing the assembly A.GlobalAssemble(); b.GlobalAssemble(); KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildLHS( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A) override { KRATOS_TRY //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions // TSparseSpace::SetToZero(BaseType::mReactionsVector); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental contribution pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution TSparseSpace::AssembleLHS(A,LHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } LHS_Contribution.resize(0,0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution TSparseSpace::AssembleLHS(A,LHS_Contribution,EquationId); } //finalizing the assembly A.GlobalAssemble(); KRATOS_CATCH("") } //************************************************************************** //************************************************************************** /** Solve the linear problem. */ void SystemSolveWithPhysics( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, ModelPart& r_model_part ) { KRATOS_TRY double norm_b; if(TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if(norm_b != 0.00) { if (this->GetEchoLevel()>1) if (mrComm.MyPID() == 0) KRATOS_WATCH("entering in the solver"); if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() ) BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, r_model_part); if (this->GetEchoLevel()>3) { EpetraExt::RowMatrixToMatrixMarketFile( "A.mm", A, "matrixA", "lhs_matrix", true); EpetraExt::MultiVectorToMatrixMarketFile( "b.mm", b, "vectorb","rhs_vector",true); KRATOS_ERROR << "Stopping after printing the matrix"; } if (this->GetEchoLevel()>3) { EpetraExt::RowMatrixToMatrixMarketFile( "A.mm", A, "matrixA", "block_matrix", true); EpetraExt::MultiVectorToMatrixMarketFile( "b.mm", b, "vectorb","rhs_vector",true); KRATOS_ERROR << "Stopping after printing the matrix"; } BaseType::mpLinearSystemSolver->Solve(A,Dx,b); } else { TSparseSpace::SetToZero(Dx); } //prints informations about the current time if (this->GetEchoLevel()>1) { std::cout << *(BaseType::mpLinearSystemSolver) << std::endl; } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** /** Build and solve the linear problem. */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY boost::timer building_time; Build(pScheme,r_model_part,A,b); if(BaseType::GetEchoLevel()>0) { if(this->mrComm.MyPID() == 0) std::cout << "Building Time : " << building_time.elapsed() << std::endl; } //apply dirichlet conditions ApplyDirichletConditions(pScheme,r_model_part,A,Dx,b); if (BaseType::GetEchoLevel()== 3) { std::cout << "before the solution of the system" << std::endl; std::cout << "System Matrix = " << A << std::endl; std::cout << "unknowns vector = " << Dx << std::endl; std::cout << "RHS vector = " << b << std::endl; } boost::timer solve_time; SystemSolveWithPhysics(A,Dx,b,r_model_part); if(BaseType::GetEchoLevel()>0) { if(this->mrComm.MyPID() == 0) std::cout << "System Solve Time : " << solve_time.elapsed() << std::endl; } if (BaseType::GetEchoLevel()== 3) { std::cout << "after the solution of the system" << std::endl; std::cout << "System Matrix = " << A << std::endl; std::cout << "unknowns vector = " << Dx << std::endl; std::cout << "RHS vector = " << b << std::endl; } KRATOS_CATCH("") } //************************************************************************** //************************************************************************** /** Build right-hand side and solve the linear problem. */ void BuildRHSAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY BuildRHS(pScheme,r_model_part,b); SystemSolveWithPhysics(A,Dx,b,r_model_part); KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemVectorType& b) override { KRATOS_TRY //Getting the Elements ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); //resetting to zero the vector of reactions // TSparseSpace::SetToZero(BaseType::mReactionsVector); //contributions to the system LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental Right Hand Side Contribution pScheme->Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution TSparseSpace::AssembleRHS(b,RHS_Contribution,EquationId); } RHS_Contribution.resize(0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution TSparseSpace::AssembleRHS(b,RHS_Contribution,EquationId); } //finalizing the assembly b.GlobalAssemble(); KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part ) override { KRATOS_TRY //Gets the array of elements from the modeler ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements(); /* ElementsArrayType& pElements = r_model_part.Elements(ModelPart::Kratos_Local); */ Element::DofsVectorType ElementalDofList; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); int rank; MPI_Comm_rank(MPI_COMM_WORLD,&rank); for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { // gets list of Dof involved on every element pScheme->GetElementalDofList(*it,ElementalDofList,CurrentProcessInfo); for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ; i != ElementalDofList.end() ; ++i) { Doftemp.push_back( *i ); } } //taking in account conditions ConditionsArrayType& pConditions = r_model_part.Conditions(); for (typename ConditionsArrayType::ptr_iterator it=pConditions.ptr_begin(); it!=pConditions.ptr_end(); ++it) { // gets list of Dof involved on every element pScheme->GetConditionDofList(*it,ElementalDofList,CurrentProcessInfo); for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ; i != ElementalDofList.end() ; ++i) { Doftemp.push_back( *i ); } } Doftemp.Unique(); BaseType::mDofSet = Doftemp; //throws an execption if there are no Degrees of freedom involved in the analysis if (BaseType::mDofSet.size()==0) KRATOS_ERROR << "No degrees of freedom!"; // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl << "Node : "<<dof_iterator->Id()<< std::endl << "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl; } } #endif BaseType::mDofSetIsInitialized = true; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void SetUpSystem( ModelPart& r_model_part ) override { // Set equation id for degrees of freedom int free_size = 0; //int fixed_size = 0; int rank; MPI_Comm_rank(MPI_COMM_WORLD,&rank); // Calculating number of fixed and free dofs for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if(dof_iterator->GetSolutionStepValue(PARTITION_INDEX) == rank) { free_size++; } // Calculating the total size and required offset //int fixed_offset; int free_offset; int global_size; // The correspounding offset by the sum of the sizes in thread with inferior rank MPI_Scan(&free_size, &free_offset, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); // The total size by the sum of all size in all threads MPI_Allreduce(&free_size, &global_size, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); // finding the offset for the begining of the partition free_offset -= free_size; // Now setting the equation id with . for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if(dof_iterator->GetSolutionStepValue(PARTITION_INDEX) == rank) { dof_iterator->SetEquationId(free_offset++); // std::cout << rank << " : set eq. id for dof " << dof_iterator->Id() << " to " << dof_iterator->EquationId() << std::endl; } BaseType::mEquationSystemSize = global_size; mLocalSystemSize = free_size; if(BaseType::GetEchoLevel()>0){ std::cout << rank << " : BaseType::mEquationSystemSize = " << BaseType::mEquationSystemSize << std::endl; std::cout << rank << " : mLocalSystemSize = " << mLocalSystemSize << std::endl; std::cout << rank << " : free_offset = " << free_offset << std::endl; //std::cout << rank << " : fixed_offset = " << fixed_offset << std::endl; } //by Riccardo ... it may be wrong! mFirstMyId = free_offset-mLocalSystemSize; mLastMyId = mFirstMyId+mLocalSystemSize; r_model_part.GetCommunicator().SynchronizeDofs(); } void UpdateGhostDofs(ModelPart& rThisModelPart) { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); // std::cout << rank << " : Strarting UpdateGhostDofs...." << std::endl; //int source=rank; int destination=0; // vector<int>& neighbours_indices = rThisModelPart[NEIGHBOURS_INDICES]; vector<int>& neighbours_indices = rThisModelPart.GetCommunicator().NeighbourIndices(); // std::cout << rank << " starting domain loop " << std::endl; for(unsigned int i_domain = 0 ; i_domain < neighbours_indices.size() ; i_domain++) if((destination = neighbours_indices[i_domain]) >= 0) { // std::cout << rank << " domian #" << i_domain << std::endl; unsigned int send_buffer_size = 0; unsigned int receive_buffer_size = 0; // std::cout << rank; // KRATOS_WATCH(destination); // Calculating send and received buffer size // The interface meshes are stored after all, local and ghost meshes NodesArrayType& r_interface_nodes = rThisModelPart.GetCommunicator().LocalMesh(i_domain).Nodes(); NodesArrayType& r_ghost_nodes = rThisModelPart.GetCommunicator().GhostMesh().Nodes(); // std::cout << rank << " : 2...." << std::endl; for(typename NodesArrayType::iterator i_node = r_interface_nodes.begin(); i_node != r_interface_nodes.end(); ++i_node) send_buffer_size += i_node->GetDofs().size(); // std::cout << rank << " : 3...." << std::endl; for(typename NodesArrayType::iterator i_node = r_ghost_nodes.begin(); i_node != r_ghost_nodes.end(); ++i_node) if(i_node->GetSolutionStepValue(PARTITION_INDEX) == destination) { receive_buffer_size += i_node->GetDofs().size(); } unsigned int position = 0; int* send_buffer = new int[send_buffer_size]; int* receive_buffer = new int[receive_buffer_size]; // Filling the buffer std::cout << rank << " : Filling the buffer...." << std::endl; for(ModelPart::NodeIterator i_node = r_interface_nodes.begin(); i_node != r_interface_nodes.end(); ++i_node) for(ModelPart::NodeType::DofsContainerType::iterator i_dof = i_node->GetDofs().begin() ; i_dof != i_node->GetDofs().end() ; i_dof++) { send_buffer[position++] = (*i_dof)->EquationId(); } MPI_Status status; if(position > send_buffer_size) std::cout << rank << " Error in estimating send buffer size...." << std::endl; int send_tag = 1;//i_domain; int receive_tag = 1;//i_domain; MPI_Sendrecv (send_buffer, send_buffer_size, MPI_INT, destination, send_tag, receive_buffer, receive_buffer_size, MPI_INT, destination, receive_tag, MPI_COMM_WORLD, &status); // std::cout << rank << " : Send and receive Finished" << std::endl; // Updating nodes position = 0; for(ModelPart::NodeIterator i_node = rThisModelPart.GetCommunicator().GhostMesh().NodesBegin() ; i_node != rThisModelPart.GetCommunicator().GhostMesh().NodesEnd() ; i_node++) // for(ModelPart::NodeIterator i_node = rThisModelPart.NodesBegin(ModelPart::Kratos_Ghost) ; // i_node != rThisModelPart.NodesEnd(ModelPart::Kratos_Ghost) ; i_node++) if(i_node->GetSolutionStepValue(PARTITION_INDEX) == destination) for(ModelPart::NodeType::DofsContainerType::iterator i_dof = i_node->GetDofs().begin() ; i_dof != i_node->GetDofs().end() ; i_dof++) { (*i_dof)->SetEquationId(receive_buffer[position++]); } if(position > receive_buffer_size) std::cout << rank << " Error in estimating receive buffer size...." << std::endl; delete [] send_buffer; delete [] receive_buffer; } } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { KRATOS_TRY //~ std::cout << "entering ResizeAndInitializeVectors" << std::endl; //resizing the system vectors and matrix if ( pA == NULL || TSparseSpace::Size1(*pA) == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { //creating a work array unsigned int number_of_local_dofs = mLastMyId - mFirstMyId; int temp_size = number_of_local_dofs; if(temp_size <1000) temp_size = 1000; int* temp = new int[temp_size]; // auto& rElements = rModelPart.Elements(); auto& rConditions = rModelPart.Conditions(); //generate map - use the "temp" array here for(unsigned int i=0; i!=number_of_local_dofs; i++) temp[i] = mFirstMyId+i; Epetra_Map my_map(-1, number_of_local_dofs, temp, 0, mrComm); //create and fill the graph of the matrix --> the temp array is reused here with a different meaning Epetra_FECrsGraph Agraph(Copy, my_map, mguess_row_size); Element::EquationIdVectorType EquationId; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); // assemble all elements for (typename ElementsArrayType::ptr_iterator it=rElements.ptr_begin(); it!=rElements.ptr_end(); ++it) { pScheme->EquationId(*it, EquationId, CurrentProcessInfo); //filling the list of active global indices (non fixed) unsigned int num_active_indices = 0; for(unsigned int i=0; i<EquationId.size(); i++) { temp[num_active_indices] = EquationId[i]; num_active_indices += 1; } if(num_active_indices != 0) { int ierr = Agraph.InsertGlobalIndices(num_active_indices,temp,num_active_indices, temp); KRATOS_ERROR_IF( ierr < 0 ) << "In " << __FILE__ << ":" << __LINE__ << ": Epetra failure in Graph.InsertGlobalIndices. Error code: " << ierr << std::endl; } } // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=rConditions.ptr_begin(); it!=rConditions.ptr_end(); ++it) { pScheme->Condition_EquationId(*it, EquationId, CurrentProcessInfo); //filling the list of active global indices (non fixed) unsigned int num_active_indices = 0; for(unsigned int i=0; i<EquationId.size(); i++) { temp[num_active_indices] = EquationId[i]; num_active_indices += 1; } if(num_active_indices != 0) { int ierr = Agraph.InsertGlobalIndices(num_active_indices,temp,num_active_indices, temp); KRATOS_ERROR_IF( ierr < 0 ) << "In " << __FILE__ << ":" << __LINE__ << ": Epetra failure in Graph.InsertGlobalIndices. Error code: " << ierr << std::endl; } } //finalizing graph construction int ierr = Agraph.GlobalAssemble(); KRATOS_ERROR_IF( ierr != 0 ) << "In " << __FILE__ << ":" << __LINE__ << ": Epetra failure in Graph.GlobalAssemble, Error code: " << ierr << std::endl; //generate a new matrix pointer according to this graph TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(Copy,Agraph) ); pA.swap(pNewA); //generate new vector pointers according to the given map if( pb == NULL || TSparseSpace::Size(*pb) != BaseType::mEquationSystemSize) { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(my_map) ); pb.swap(pNewb); } if( pDx == NULL || TSparseSpace::Size(*pDx) != BaseType::mEquationSystemSize) { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(my_map) ); pDx.swap(pNewDx); } if( BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(my_map) ); BaseType::mpReactionsVector.swap(pNewReactionsVector); } delete [] temp; } else if (BaseType::mpReactionsVector == nullptr && this->mCalculateReactionsFlag) { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(pDx->Map()) ); BaseType::mpReactionsVector.swap(pNewReactionsVector); } else { if(TSparseSpace::Size1(*pA) == 0 || TSparseSpace::Size1(*pA) != BaseType::mEquationSystemSize || TSparseSpace::Size2(*pA) != BaseType::mEquationSystemSize) { KRATOS_ERROR << "It should not come here resizing is not allowed this way!!!!!!!! ... "; } } //if needed resize the vector for the calculation of reactions // if(BaseType::mCalculateReactionsFlag == true) // { // // KRATOS_THROW_ERROR(std::logic_error,"calculation of reactions not yet implemented with Trilinos",""); // } //~ std::cout << "finished ResizeAndInitializeVectors" << std::endl; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void InitializeSolutionStep( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void FinalizeSolutionStep( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { } //************************************************************************** //************************************************************************** void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { TSparseSpace::SetToZero(b); //refresh RHS to have the correct reactions BuildRHS(pScheme, r_model_part, b); //initialize the Epetra importer // TODO: this part of the code has been pasted until a better solution is found int system_size = TSparseSpace::Size(b); int number_of_dofs = BaseType::mDofSet.size(); std::vector< int > index_array(number_of_dofs); //filling the array with the global ids int counter = 0; for(typename DofsArrayType::iterator i_dof = BaseType::mDofSet.begin(); i_dof != BaseType::mDofSet.end(); ++i_dof) { int id = i_dof->EquationId(); if( id < system_size ) { index_array[counter] = id; counter += 1; } } std::sort(index_array.begin(),index_array.end()); std::vector<int>::iterator NewEnd = std::unique(index_array.begin(),index_array.end()); index_array.resize(NewEnd-index_array.begin()); int check_size = -1; int tot_update_dofs = index_array.size(); b.Comm().SumAll(&tot_update_dofs,&check_size,1); if ( (check_size < system_size) && (b.Comm().MyPID() == 0) ) { KRATOS_ERROR << "Dof count is not correct. There are less dofs than expected.\n" << "Expected number of active dofs = " << system_size << " dofs found = " << check_size ; } //defining a map as needed Epetra_Map dof_update_map(-1,index_array.size(), &(*(index_array.begin())),0,b.Comm() ); //defining the importer class Kratos::shared_ptr<Epetra_Import> pDofImporter = Kratos::make_shared<Epetra_Import>(dof_update_map,b.Map()); //defining a temporary vector to gather all of the values needed Epetra_Vector temp_RHS(pDofImporter->TargetMap()); //importing in the new temp_RHS vector the values int ierr = temp_RHS.Import(b, *pDofImporter, Insert); if(ierr != 0) KRATOS_ERROR << "Epetra failure found - error code: " << ierr; double* temp_RHS_values; //DO NOT make delete of this one!! temp_RHS.ExtractView(&temp_RHS_values); b.Comm().Barrier(); const int ndofs = static_cast<int>(BaseType::mDofSet.size()); // store the RHS values in the reaction variable //NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver #pragma omp parallel for firstprivate(ndofs) for (int k = 0; k<ndofs; k++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k; const int i = (dof_iterator)->EquationId(); // (dof_iterator)->GetSolutionStepReactionValue() = -(*b[i]); const double react_val = temp_RHS[pDofImporter->TargetMap().LID(i)]; (dof_iterator->GetSolutionStepReactionValue()) = -react_val; } } void BuildLHS_CompleteOnFreeRows( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A) override { KRATOS_ERROR << "method BuildLHS_CompleteOnFreeRows not implemented in Trilinos Builder And Solver"; } //************************************************************************** //************************************************************************** void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY int rank; MPI_Comm_rank(MPI_COMM_WORLD,&rank); //loop over all dofs to find the fixed ones std::vector<int> global_ids(BaseType::mDofSet.size()); std::vector<int> is_dirichlet(BaseType::mDofSet.size()); unsigned int i=0; for (typename DofsArrayType::iterator dof_it = BaseType::mDofSet.begin(); dof_it != BaseType::mDofSet.end(); ++dof_it) { const int global_id = dof_it->EquationId(); global_ids[i] = global_id; if( dof_it->IsFixed() ) is_dirichlet[i] = 1; else is_dirichlet[i] = 0; i++; } //here we construct and fill a vector "fixed local" which cont Epetra_Map localmap( -1, global_ids.size(), global_ids.data(), 0, A.Comm() ); Epetra_IntVector fixed_local( Copy, localmap, is_dirichlet.data() ); Epetra_Import dirichlet_importer(A.ColMap(), fixed_local.Map()); //defining a temporary vector to gather all of the values needed Epetra_IntVector fixed( A.ColMap() ); //importing in the new temp vector the values int ierr = fixed.Import(fixed_local,dirichlet_importer,Insert); if(ierr != 0) KRATOS_ERROR << "Epetra failure found"; /* //now fill the local bitarray employed to store the dirichlet rows and cols in local numeration //dirichlet_rows will be numbered according to A.RowMap() //dirichlet_cols will be numbered according to A.ColMap() std::vector< int > mdirichlet_rows( A.NumMyRows()); std::vector< int > mdirichlet_cols( fixed.MyLength() ); KRATOS_WATCH(mdirichlet_rows.size()) unsigned int counter = 0; for(unsigned int i=0; i<mdirichlet_rows.size(); i++) { int lid = localmap.LID( A.RowMap().GID(i) ); if(lid < 0) KRATOS_THROW_ERROR(std::runtime_error," a negative lid was found",""); if( fixed_local[lid] == 0) mdirichlet_rows[i] = false; else { mdirichlet_rows[i] = true; counter++; } } KRATOS_WATCH(counter); for(unsigned int i=0; i< mdirichlet_cols.size(); i++) { if(fixed[i] == 0) mdirichlet_cols[i] = false; else mdirichlet_cols[i] = true; } */ // KRATOS_WATCH(A.NumMyRows()) for (int i=0; i < A.NumMyRows(); i++) { int numEntries; // number of non-zero entries double *vals; // row non-zero values int *cols; // column indices of row non-zero values A.ExtractMyRowView(i,numEntries,vals,cols); int row_gid = A.RowMap().GID(i); int row_lid = localmap.LID(row_gid); if( fixed_local[row_lid] == 0 ) //not a dirichlet row { for (int j=0; j < numEntries; j++) { if(fixed[ cols[j] ] == true) vals[j] = 0.0; } } else //this IS a dirichlet row { //set to zero the rhs b[0][i] = 0.0; //note that the index of i is expected to be coherent with the rows of A //set to zero the whole row for (int j=0; j < numEntries; j++) { int col_gid = A.ColMap().GID(cols[j]); if (col_gid != row_gid) vals[j] = 0.0; } } } // // for (int i=0; i < A.NumMyRows(); i++) { // int numEntries; // double *vals; // int *cols; // A.ExtractMyRowView(i,numEntries,vals,cols); // // int row_gid = A.RowMap().GID(i); // int row_lid = dofmap.LID( row_gid ); // // if(row_lid < 0) // KRATOS_WATCH("not working :-("); // // if(fixed[row_lid] == 0) //not a dirichlet Row // { // for (int j=0; j < numEntries; j++) // { // const int col_gid = A.ColMap().GID( cols[j] ); // const int col_lid = dofmap.LID( col_gid ); // if(col_lid < 0) // std::cout << " pid="<<A.Comm().MyPID() << " cols[j] = " << cols[j] << " gid= " << col_gid << " lid=" << col_lid << std::endl; // // if(fixed[ col_lid ] > 0) vals[j] = 0.0; // } // } // else //this IS a dirichlet row // { // //set to zero the rhs // b[0][i] = 0.0; //note that the index of i is expected to be coherent with the rows of A // // //set to zero the whole row except the diag // for (int j=0; j < numEntries; j++) // { // const int col_gid = A.ColMap().GID( cols[j] ); // const int col_lid = dofmap.LID( col_gid ); // if(col_gid == row_gid) // vals[j] = 1; // else // vals[j] = 0; // } // } // } //std::cout << "finished modifying A for dirichlet" << std::endl; /* int NumEntries; // number of nonzero entries extracted std::vector<unsigned int> fixed_ids; fixed_ids.reserve(1000); for (typename DofsArrayType::iterator dof_it = BaseType::mDofSet.begin(); dof_it != BaseType::mDofSet.end(); ++dof_it) { if (dof_it->IsFixed()) fixed_ids.push_back(dof_it->EquationId()); if(dof_it->GetSolutionStepValue(PARTITION_INDEX) == rank) { if (dof_it->IsFixed()) { int GlobalRow = dof_it->EquationId(); // row to extract int Length = A.NumGlobalEntries(dof_it->EquationId()); // length of Values and Indices double* Values = new double[Length]; // extracted values for this row int* Indices = new int[Length]; // extracted global column indices for the corresponding values A.ExtractGlobalRowCopy(GlobalRow, Length, NumEntries, Values, Indices); // put 0.0 in each row A[ii] and 1.0 on the diagonal for (int ii=0; ii<Length; ii++) { if (Indices[ii] == GlobalRow) Values[ii]=1.0; else Values[ii]=0.0; } A.ReplaceGlobalValues(GlobalRow, Length, Values, Indices); // redo better !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! int* gb= new int[1]; gb[0]=GlobalRow; A.ReplaceGlobalValues(Length, Indices, 1, gb, Values); double* bb=new double[1]; bb[0]=0.0; // put 0.0 in vector b[GlobalRow] if GlobalRow is a fixed dof b.ReplaceGlobalValues(1,gb,bb); delete [] Values; delete [] Indices; delete [] gb; delete [] bb; } } } //now set the columns to zero for (typename DofsArrayType::iterator dof_it = BaseType::mDofSet.begin(); dof_it != BaseType::mDofSet.end(); ++dof_it) { if(dof_it->GetSolutionStepValue(PARTITION_INDEX) == rank) { if ( ! dof_it->IsFixed()) //NOT FIXED!! { int GlobalRow = dof_it->EquationId(); // row to extract int Length = A.NumGlobalEntries(dof_it->EquationId()); // length of Values and Indices double* Values = new double[Length]; // extracted values for this row int* Indices = new int[Length]; // extracted global column indices for the corresponding values A.ExtractGlobalRowCopy(GlobalRow, Length, NumEntries, Values, Indices); // put 0.0 in each row A[ii] and 1.0 on the diagonal for (int ii=0; ii<Length; ii++) { if ( std::find(fixed_ids.begin(), fixed_ids.end(), Indices[ii]) != fixed_ids.end() )//if the node is in the fixed list Values[ii]=0.0; } A.ReplaceGlobalValues(GlobalRow, Length, Values, Indices); delete [] Values; delete [] Indices; } } }*/ KRATOS_CATCH(""); } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ Epetra_MpiComm& mrComm; int mguess_row_size; unsigned int mLocalSystemSize; int mFirstMyId; int mLastMyId; /*@} */ /**@name Protected Operators*/ /*@{ */ /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ /*@} */ /**@name Private Operators*/ /*@{ */ /*@} */ /**@name Private Operations*/ /*@{ */ //************************************************************************** void AssembleLHS_CompleteOnFreeRows( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { KRATOS_ERROR << "This method is not implemented for Trilinos"; } /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class TrilinosBlockBuilderAndSolver */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_TRILINOS_BLOCK_BUILDER_AND_SOLVER defined */
omp_barrier.c
#include<stdio.h> #include<omp.h> #include<stdlib.h> int main() { int x=2; #pragma omp parallel num_threads(2) shared(x) { if (omp_get_thread_num()==0) { x = 5; } else { printf("1 : Thread %d : x = %d\n", omp_get_thread_num(), x); } #pragma omp barrier if (omp_get_thread_num() == 0) { printf("2 : Thread %d : x = %d\n", omp_get_thread_num(), x); } else { printf("3 : Thread %d : x = %d\n", omp_get_thread_num(), x); } } return 0; }
ks_cpp.c
#include <stdio.h> #include <malloc.h> #include <assert.h> #include <memory.h> #include <math.h> // C bool typedef enum { true=1, false=0 } bool; inline void update_min(double* p1, double v2) { if (v2 < *p1) *p1 = v2; } // https://stackoverflow.com/questions/28258590/using-openmp-to-get-the-index-of-minimum-element-parallelly struct Compare { double val; size_t index; }; #pragma omp declare reduction(maximum : struct Compare : omp_out = omp_in.val > omp_out.val ? omp_in : omp_out) void kennard_stone(double* cdist, size_t* seed, size_t* result, double* v_dist, size_t n_sample, size_t n_seed, size_t n_result) { // 00. Assertions and Result Vector Initialization struct Compare sup; if (n_seed == 2) v_dist[0] = cdist[seed[0] * n_sample + seed[1]]; if (n_seed == 0) { size_t n_sample_2 = n_sample * n_sample; sup.val = -1.; sup.index = -1; #pragma omp parallel for reduction(maximum:sup) for (size_t i = 0; i < n_sample_2; ++i) { if (cdist[i] > sup.val) { sup.val = cdist[i]; sup.index = i; } } if (sup.index == 0) { // Threading Safety Check sup.val = -1.; sup.index = -1; for (size_t i = 0; i < n_sample_2; ++i) { if (cdist[i] > sup.val) { sup.val = cdist[i]; sup.index = i; } } } seed[0] = sup.index / n_sample; seed[1] = sup.index % n_sample; n_seed = 2; v_dist[0] = sup.val; } n_result = n_result == 0 ? n_sample : n_result; assert(n_result <= n_sample); assert(n_seed <= n_sample); memcpy(result, seed, n_seed * sizeof(size_t)); memset(result + n_seed, 0, (n_result - n_seed) * sizeof(size_t)); // 01. Scratch Area Initialization bool* selected = (bool*)malloc(n_sample * sizeof(bool)); memset(selected, false, n_sample * sizeof(bool)); for (size_t i = 0; i < n_seed; ++i) selected[result[i]] = true; // 02. Minimum Out-of-Group Initialization double* min_vals = (double*)malloc(n_sample * sizeof(double)); memcpy(min_vals, cdist + n_sample * result[0], n_sample * sizeof(double)); for (size_t n = 1; n < n_seed; ++n) { size_t idx_starting = result[n] * n_sample; #pragma omp parallel for for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; update_min(&min_vals[i], cdist[idx_starting + i]); } } // 03. Main Algorithm for (size_t n = n_seed; n < n_result; ++n) { // Find sup of the minimum sup.val = -1.; sup.index = 0; #pragma omp parallel for reduction(maximum:sup) for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; if (min_vals[i] > sup.val) { sup.index = i; sup.val = min_vals[i]; } } if (sup.index == 0) { // Threading Safety Check sup.val = -1.; sup.index = n_sample + 1; for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; if (min_vals[i] > sup.val) { sup.index = i; sup.val = min_vals[i]; } } } v_dist[n - 1] = sup.val; selected[sup.index] = true; result[n] = sup.index; size_t idx_starting = sup.index * n_sample; #pragma omp parallel for for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; update_min(&min_vals[i], cdist[idx_starting + i]); } } free(selected); free(min_vals); } double euclid_distance_vector(double* x1, double* x2, size_t n_feature) { double res = 0.; do { res += (*x1 - *x2) * (*x1 - *x2); ++x1, ++x2; } while (--n_feature); return sqrt(res); } void kennard_stone_mem(double* X, size_t* seed, size_t* result, double* v_dist, size_t n_sample, size_t n_feature, size_t n_seed, size_t n_result) { // 00. Assertions and Result Vector Initialization struct Compare sup; if (n_seed == 2) v_dist[0] = euclid_distance_vector(X + n_feature * seed[0], X + n_feature * seed[1], n_feature); assert(n_seed != 0); // Seed should be supplied from outer program. assert(n_result <= n_sample); assert(n_seed <= n_sample); memcpy(result, seed, n_seed * sizeof(size_t)); memset(result + n_seed, 0, (n_result - n_seed) * sizeof(size_t)); // 01. Scratch Area Initialization bool* selected = (bool*)malloc(n_sample * sizeof(bool)); memset(selected, false, n_sample * sizeof(bool)); for (size_t i = 0; i < n_seed; ++i) selected[result[i]] = true; // 02. Minimum Out-of-Group Initialization double* min_vals = (double*)malloc(n_sample * sizeof(double)); #pragma omp parallel for for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; min_vals[i] = euclid_distance_vector(X + n_feature * result[0], X + n_feature * i, n_feature); } for (size_t n = 1; n < n_seed; ++n) { double* p_starting = X + result[n] * n_feature; #pragma omp parallel for for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; update_min(&min_vals[i], euclid_distance_vector(p_starting, X + n_feature * i, n_feature)); } } // 03. Main Algorithm for (size_t n = n_seed; n < n_result; ++n) { // Find sup of the minimum sup.val = -1.; sup.index = 0; #pragma omp parallel for reduction(maximum:sup) for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; if (min_vals[i] > sup.val) { sup.index = i; sup.val = min_vals[i]; } } if (sup.index == 0) { // Threading Safety Check sup.val = -1.; sup.index = 0; for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; if (min_vals[i] > sup.val) { sup.index = i; sup.val = min_vals[i]; } } } v_dist[n - 1] = sup.val; selected[sup.index] = true; result[n] = sup.index; double* p_starting = X + sup.index * n_feature; #pragma omp parallel for for (size_t i = 0; i < n_sample; ++i) { if (selected[i]) continue; update_min(&min_vals[i], euclid_distance_vector(p_starting, X + n_feature * i, n_feature)); } } free(selected); free(min_vals); }
GB_unop__signum_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__signum_fp64_fp64 // op(A') function: GB_unop_tran__signum_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = GB_signum (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_signum (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = GB_signum (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIGNUM || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__signum_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = GB_signum (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__signum_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rankSwapSort.h
#ifndef __HYPER_QUICKSORT_COMM_AVOID_H__ #define __HYPER_QUICKSORT_COMM_AVOID_H__ #include <cstdio> #include "dendro.h" // #define long long long #ifdef _PROFILE_SORT #include "sort_profiler.h" #endif #include <mpi.h> namespace par { template<typename T> int RankSwapSort(std::vector<T>& arr, MPI_Comm comm_){ // O( ((N/p)+log(p))*(log(N/p)+log(p)) ) #ifdef __PROFILE_WITH_BARRIER__ MPI_Barrier(comm); #endif PROF_SORT_BEGIN #ifdef _PROFILE_SORT long bytes_comm=0; total_bytes = 0; total_sort.start(); #endif // Copy communicator. MPI_Comm comm=comm_; // Get comm size and rank. int npes, myrank, _rank; MPI_Comm_size(comm, &npes); MPI_Comm_rank(comm, &myrank); // if (!myrank) printf("__rank_swap_sort___\n"); if(npes==1){ // if (!myrank) printf("npes == 1\n"); #ifdef _PROFILE_SORT seq_sort.start(); #endif // printf("%d: starting seq sort\n", myrank); fflush(stdout); omp_par::merge_sort(&arr[0],&arr[arr.size()]); // printf("%d: done seq sort\n", myrank); fflush(stdout); #ifdef _PROFILE_SORT seq_sort.stop(); total_sort.stop(); #endif PROF_SORT_END } // buffers ... keeping all allocations together std::vector<T> commBuff; std::vector<T> mergeBuff; std::vector<int> glb_splt_cnts(npes); std::vector<int> glb_splt_disp(npes,0); int omp_p=omp_get_max_threads(); srand(myrank); // Local and global sizes. O(log p) long totSize, nelem = arr.size(); assert(nelem); par::Mpi_Allreduce<long>(&nelem, &totSize, 1, MPI_SUM, comm); long nelem_ = nelem; // if (!myrank) printf("starting sequential sort - %ld\n", totSize); fflush(stdout); // Local sort. O(n/p log n/p) #ifdef _PROFILE_SORT seq_sort.start(); #endif omp_par::merge_sort(&arr[0], &arr[arr.size()]); // std::sort(&arr[0], &arr[arr.size()]); #ifdef _PROFILE_SORT seq_sort.stop(); #endif /* MPI_Barrier(comm); if(!myrank) { printf("finished sequential sort \n"); printf("-----------------------------------\n"); fflush(stdout); } */ int iter=0; _rank = myrank; // Binary split and merge in each iteration. while(npes>1 && totSize>0){ // O(log p) iterations. // if (!_rank) printf("--------- iter: %d ---------\n", iter); fflush(stdout); //Determine splitters. O( log(N/p) + log(p) ) #ifdef _PROFILE_SORT hyper_compute_splitters.start(); #endif T split_key; long totSize_new; //while(true) { // Take random splitters. O( 1 ) -- Let p * splt_count = glb_splt_count = const = 100~1000 long splts = nelem; splts = (splts*1000)/totSize; int splt_count = splts; if (npes>1000) splt_count = ( ((float)rand()/(float)RAND_MAX)*totSize < (1000*nelem) ? 1 : 0 ); if ( splt_count > nelem ) splt_count = nelem; std::vector<T> splitters(splt_count); for(size_t i=0;i<splt_count;i++) splitters[i] = arr[rand() % nelem]; // Gather all splitters. O( log(p) ) int glb_splt_count; par::Mpi_Allgather<int>(&splt_count, &glb_splt_cnts[0], 1, comm); omp_par::scan(&glb_splt_cnts[0],&glb_splt_disp[0],npes); glb_splt_count = glb_splt_cnts[npes-1] + glb_splt_disp[npes-1]; std::vector<T> glb_splitters(glb_splt_count); MPI_Allgatherv(&splitters[0], splt_count, par::Mpi_datatype<T>::value(), &glb_splitters[0], &glb_splt_cnts[0], &glb_splt_disp[0], par::Mpi_datatype<T>::value(), comm); // Determine split key. O( log(N/p) + log(p) ) std::vector<long> disp(glb_splt_count,0); if(nelem>0){ #pragma omp parallel for for(size_t i=0;i<glb_splt_count;i++){ disp[i]=std::lower_bound(&arr[0], &arr[nelem], glb_splitters[i]) - &arr[0]; } } std::vector<long> glb_disp(glb_splt_count,0); MPI_Allreduce(&disp[0], &glb_disp[0], glb_splt_count, par::Mpi_datatype<long>::value(), MPI_SUM, comm); long* split_disp = &glb_disp[0]; for(size_t i=0; i<glb_splt_count; i++) if ( abs(glb_disp[i] - totSize/2) < abs(*split_disp - totSize/2) ) split_disp = &glb_disp[i]; // printf("%d ## %d ---- %d\n", iter, _rank, glb_splt_count); fflush(stdout); split_key = glb_splitters[split_disp - &glb_disp[0]]; // printf("%d ~~ %d\n", iter, _rank); fflush(stdout); totSize_new=(myrank<=(npes-1)/2?*split_disp:totSize-*split_disp); //double err=(((double)*split_disp)/(totSize/2))-1.0; //if(fabs(err)<0.01 || npes<=16) break; //else if(!myrank) std::cout<<err<<'\n'; } #ifdef _PROFILE_SORT hyper_compute_splitters.stop(); #endif // Split problem into two. O( N/p ) bool swap_ranks = false; int partner; int split_id=(npes-1)/2; { #ifdef _PROFILE_SORT hyper_communicate.start(); #endif int new_p0 = (myrank<=split_id ? 0:split_id+1); int cmp_p0 = (myrank> split_id ? 0:split_id+1); int new_np = (myrank<=split_id ? split_id+1: npes-split_id-1); int cmp_np = (myrank> split_id ? split_id+1: npes-split_id-1); partner = myrank + cmp_p0-new_p0; if (partner>=npes) partner=npes-1; assert(partner>=0); // bool extra_partner=( npes%2==1 && npes-1==myrank ); // Exchange send sizes. char *low_buff, *high_buff; char *sbuff, *lbuff; long rsizes[2], ssizes[2], rsize=0, ssize=0, lsize; size_t split_indx=(nelem>0?std::lower_bound(&arr[0], &arr[nelem], split_key)-&arr[0]:0); ssizes[0] = split_indx*sizeof(T); ssizes[1] = (nelem - split_indx)*sizeof(T); low_buff = (char *)(&arr[0]); high_buff = (char *)(&arr[split_indx]); MPI_Status status; MPI_Sendrecv (&ssizes, 2, MPI_LONG, partner, 0, &rsizes, 2, MPI_LONG, partner, 0, comm, &status); // if(extra_partner) MPI_Sendrecv(&ext_ssize,1,MPI_INT,split_id,0, &ext_rsize,1,MPI_INT,split_id, 0,comm,&status); // { modify for rank-swap if ( myrank > split_id ) { // default keep_high, if ( (ssizes[1] + rsizes[0]) < (ssizes[0] + rsizes[1]) ) { // printf("%d: swapping ranks:hi\n", _rank); fflush(stdout); swap_ranks = true; ssize = ssizes[1]; sbuff = high_buff; lsize = ssizes[0]; lbuff = low_buff; rsize = rsizes[0]; } else { ssize = ssizes[0]; sbuff = low_buff; lsize = ssizes[1]; lbuff = high_buff; rsize = rsizes[1]; } } else { // default keep_low if ( (ssizes[0] + rsizes[1]) < (ssizes[1] + rsizes[0]) ) { // printf("%d: swapping ranks:lo\n", _rank); fflush(stdout); swap_ranks = true; ssize = ssizes[0]; sbuff = low_buff; lsize = ssizes[1]; lbuff = high_buff; rsize = rsizes[1]; } else { ssize = ssizes[1]; sbuff = high_buff; lsize = ssizes[0]; lbuff = low_buff; rsize = rsizes[0]; } } // } modify for rank-swap // printf("pre --- %d: %ld + %ld = %ld\n", _rank, ssizes[0], ssizes[1], ssizes[0]+ssizes[1]);fflush(stdout); // printf("pos --- %d: %ld + %ld = %ld\n", _rank, lsize, rsize, lsize+rsize);fflush(stdout); // Exchange data. commBuff.reserve(rsize/sizeof(T)); char* rbuff = (char *)(&commBuff[0]); MPI_Sendrecv (sbuff, ssize, MPI_BYTE, partner, 0, rbuff, rsize, MPI_BYTE, partner, 0, comm, &status); #ifdef _PROFILE_SORT bytes_comm += ssize; hyper_communicate.stop(); hyper_merge.start(); #endif int nbuff_size=lsize+rsize; mergeBuff.reserve(nbuff_size/sizeof(T)); char* nbuff= (char *)(&mergeBuff[0]); // new char[nbuff_size]; omp_par::merge<T*>((T*)lbuff, (T*)&lbuff[lsize], (T*)rbuff, (T*)&rbuff[rsize], (T*)nbuff, omp_p, std::less<T>()); // Copy new data. totSize=totSize_new; nelem = nbuff_size/sizeof(T); mergeBuff.swap(arr); mergeBuff.clear(); // printf("++++ %d : %ld, %ld +++++ \n", _rank, nelem, totSize); #ifdef _PROFILE_SORT hyper_merge.stop(); #endif } {// Split comm. O( log(p) ) ?? #ifdef _PROFILE_SORT hyper_comm_split.start(); #endif MPI_Comm scomm; int oldrank = myrank, grank; // printf("--> %d || %d, %d \n", iter, _rank, oldrank);fflush(stdout); if (swap_ranks) { MPI_Comm_split(comm, partner<=split_id, partner, &scomm ); comm = scomm; npes = (partner<=split_id? split_id+1: npes -split_id-1); myrank = (partner<=split_id? partner : partner-split_id-1); } else { MPI_Comm_split(comm, myrank<=split_id, myrank, &scomm ); comm = scomm; npes = (myrank<=split_id? split_id+1: npes -split_id-1); myrank = (myrank<=split_id? myrank : myrank-split_id-1); } // MPI_Comm_rank(comm, &grank); // printf("%d || %d: %d -> %d, %d \n", iter, _rank, oldrank, myrank, grank);fflush(stdout); // MPI_Barrier(comm_); // if (!_rank) printf("---------\n"); fflush(stdout); iter++; #ifdef _PROFILE_SORT hyper_comm_split.stop(); #endif } } // MPI_Barrier(comm_); // printf("%d: ALL DONE\n", _rank); fflush(stdout); // Consider swapping ranks back ... // SortedElem.resize(nelem); // SortedElem.assign(arr, &arr[nelem]); // if(arr_!=NULL) delete[] arr_; // par::partitionW<T>(SortedElem, NULL , comm_); // par::partitionW<T>(arr, NULL , comm_); #ifdef _PROFILE_SORT total_sort.stop(); par::Mpi_Allreduce<long>(&bytes_comm, &total_bytes, 1, MPI_SUM, comm_); // if (!_rank) printf("Total comm is %ld bytes\n", total_comm); #endif PROF_SORT_END }//end function //-------------------------------------------------------------------------------- template<typename T> int RankSwapSort(std::vector<T>& arr, std::vector<T> & SortedElem, MPI_Comm comm_){ // O( ((N/p)+log(p))*(log(N/p)+log(p)) ) #ifdef __PROFILE_WITH_BARRIER__ MPI_Barrier(comm); #endif PROF_SORT_BEGIN #ifdef _PROFILE_SORT total_sort.start(); #endif // Copy communicator. MPI_Comm comm=comm_; // Get comm size and rank. int npes, myrank, myrank_; MPI_Comm_size(comm, &npes); MPI_Comm_rank(comm, &myrank); myrank_=myrank; if(npes==1){ // @dhairya isn't this wrong for the !sort-in-place case ... #ifdef _PROFILE_SORT seq_sort.start(); #endif omp_par::merge_sort(&arr[0],&arr[arr.size()]); #ifdef _PROFILE_SORT seq_sort.stop(); #endif SortedElem = arr; #ifdef _PROFILE_SORT total_sort.stop(); #endif PROF_SORT_END } int omp_p=omp_get_max_threads(); srand(myrank); // Local and global sizes. O(log p) long totSize, nelem = arr.size(); assert(nelem); par::Mpi_Allreduce<long>(&nelem, &totSize, 1, MPI_SUM, comm); long nelem_ = nelem; // Local sort. #ifdef _PROFILE_SORT seq_sort.start(); #endif T* arr_=new T[nelem]; memcpy (&arr_[0], &arr[0], nelem*sizeof(T)); omp_par::merge_sort(&arr_[0], &arr_[arr.size()]); #ifdef _PROFILE_SORT seq_sort.stop(); #endif // Binary split and merge in each iteration. while(npes>1 && totSize>0){ // O(log p) iterations. //Determine splitters. O( log(N/p) + log(p) ) #ifdef _PROFILE_SORT hyper_compute_splitters.start(); #endif T split_key; long totSize_new; //while(true) { // Take random splitters. O( 1 ) -- Let p * splt_count = glb_splt_count = const = 100~1000 int splt_count=(1000*nelem)/totSize; if(npes>1000) splt_count=(((float)rand()/(float)RAND_MAX)*totSize<(1000*nelem)?1:0); if(splt_count>nelem) splt_count=nelem; std::vector<T> splitters(splt_count); for(size_t i=0;i<splt_count;i++) splitters[i]=arr_[rand()%nelem]; // Gather all splitters. O( log(p) ) int glb_splt_count; std::vector<int> glb_splt_cnts(npes); std::vector<int> glb_splt_disp(npes,0); par::Mpi_Allgather<int>(&splt_count, &glb_splt_cnts[0], 1, comm); omp_par::scan(&glb_splt_cnts[0],&glb_splt_disp[0],npes); glb_splt_count=glb_splt_cnts[npes-1]+glb_splt_disp[npes-1]; std::vector<T> glb_splitters(glb_splt_count); MPI_Allgatherv(& splitters[0], splt_count, par::Mpi_datatype<T>::value(), &glb_splitters[0], &glb_splt_cnts[0], &glb_splt_disp[0], par::Mpi_datatype<T>::value(), comm); // Determine split key. O( log(N/p) + log(p) ) std::vector<long> disp(glb_splt_count,0); if(nelem>0){ #pragma omp parallel for for(size_t i=0;i<glb_splt_count;i++){ disp[i]=std::lower_bound(&arr_[0], &arr_[nelem], glb_splitters[i])-&arr_[0]; } } std::vector<long> glb_disp(glb_splt_count,0); MPI_Allreduce(&disp[0], &glb_disp[0], glb_splt_count, par::Mpi_datatype<long>::value(), MPI_SUM, comm); long* split_disp=&glb_disp[0]; for(size_t i=0;i<glb_splt_count;i++) if( labs(glb_disp[i]-totSize/2) < labs(*split_disp-totSize/2)) split_disp=&glb_disp[i]; split_key=glb_splitters[split_disp-&glb_disp[0]]; totSize_new=(myrank<=(npes-1)/2?*split_disp:totSize-*split_disp); //double err=(((double)*split_disp)/(totSize/2))-1.0; //if(fabs(err)<0.01 || npes<=16) break; //else if(!myrank) std::cout<<err<<'\n'; } #ifdef _PROFILE_SORT hyper_compute_splitters.stop(); #endif // Split problem into two. O( N/p ) int split_id=(npes-1)/2; { #ifdef _PROFILE_SORT hyper_communicate.start(); #endif int new_p0=(myrank<=split_id?0:split_id+1); int cmp_p0=(myrank> split_id?0:split_id+1); int new_np=(myrank<=split_id? split_id+1: npes-split_id-1); int cmp_np=(myrank> split_id? split_id+1: npes-split_id-1); int partner = myrank+cmp_p0-new_p0; if(partner>=npes) partner=npes-1; assert(partner>=0); bool extra_partner=( npes%2==1 && npes-1==myrank ); // Exchange send sizes. char *sbuff, *lbuff; int rsize=0, ssize=0, lsize=0; int ext_rsize=0, ext_ssize=0; size_t split_indx=(nelem>0?std::lower_bound(&arr_[0], &arr_[nelem], split_key)-&arr_[0]:0); ssize= (myrank> split_id? split_indx: nelem-split_indx )*sizeof(T); sbuff=(char*)(myrank> split_id? &arr_[0] : &arr_[split_indx]); lsize= (myrank<=split_id? split_indx: nelem-split_indx )*sizeof(T); lbuff=(char*)(myrank<=split_id? &arr_[0] : &arr_[split_indx]); MPI_Status status; MPI_Sendrecv (& ssize,1,MPI_INT, partner,0, & rsize,1,MPI_INT, partner, 0,comm,&status); if(extra_partner) MPI_Sendrecv(&ext_ssize,1,MPI_INT,split_id,0, &ext_rsize,1,MPI_INT,split_id, 0,comm,&status); // Exchange data. char* rbuff= new char[ rsize] ; char* ext_rbuff=(ext_rsize>0? new char[ext_rsize]: NULL); MPI_Sendrecv (sbuff,ssize,MPI_BYTE, partner,0, rbuff, rsize,MPI_BYTE, partner, 0,comm,&status); if(extra_partner) MPI_Sendrecv( NULL, 0,MPI_BYTE,split_id,0, ext_rbuff,ext_rsize,MPI_BYTE,split_id, 0,comm,&status); #ifdef _PROFILE_SORT hyper_communicate.stop(); hyper_merge.start(); #endif int nbuff_size=lsize+rsize+ext_rsize; char* nbuff= new char[nbuff_size]; omp_par::merge<T*>((T*)lbuff, (T*)&lbuff[lsize], (T*)rbuff, (T*)&rbuff[rsize], (T*)nbuff, omp_p, std::less<T>()); if(ext_rsize>0 && nbuff!=NULL){ char* nbuff1= new char[nbuff_size]; omp_par::merge<T*>((T*)nbuff, (T*)&nbuff[lsize+rsize], (T*)ext_rbuff, (T*)&ext_rbuff[ext_rsize], (T*)nbuff1, omp_p, std::less<T>()); if(nbuff!=NULL) delete[] nbuff; nbuff=nbuff1; } // Copy new data. totSize=totSize_new; nelem = nbuff_size/sizeof(T); if(arr_!=NULL) delete[] arr_; arr_=(T*) nbuff; nbuff=NULL; //Free memory. if( rbuff!=NULL) delete[] rbuff; if(ext_rbuff!=NULL) delete[] ext_rbuff; #ifdef _PROFILE_SORT hyper_merge.stop(); #endif } #ifdef _PROFILE_SORT hyper_comm_split.start(); #endif {// Split comm. O( log(p) ) ?? MPI_Comm scomm; MPI_Comm_split(comm, myrank<=split_id, myrank, &scomm ); comm=scomm; npes =(myrank<=split_id? split_id+1: npes -split_id-1); myrank=(myrank<=split_id? myrank : myrank-split_id-1); } #ifdef _PROFILE_SORT hyper_comm_split.stop(); #endif } SortedElem.resize(nelem); SortedElem.assign(arr_, &arr_[nelem]); if(arr_!=NULL) delete[] arr_; #ifdef _PROFILE_SORT sort_partitionw.start(); #endif // par::partitionW<T>(SortedElem, NULL , comm_); #ifdef _PROFILE_SORT sort_partitionw.stop(); #endif #ifdef _PROFILE_SORT total_sort.stop(); #endif PROF_SORT_END }//end function }; #endif
conv_im2col_sgemm_sse_pack.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. // #include "option.h" #include "mat.h" namespace ncnn{ static void conv_im2col_sgemm_sse_pack(const Mat &bottom_blob, Mat &top_blob, const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt, int inch, int outch, int outh, int outw) { size_t elemsize = bottom_blob.elemsize; int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // bottom_im2col memory packed 4 x 4 Mat bottom_im2col = bottom_blob; Mat bottom_tm = top_blob; bottom_tm.create(4*kernel_size, inch, out_size/4 + out_size%4, elemsize, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 4; const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/4); for (int q=0; q<inch*kernel_size; q++) { #if __SSE__ _mm_storeu_ps(tmpptr, _mm_loadu_ps(img0)); #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; #endif // __SSE__ tmpptr += 4; img0 += out_size; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<out_size; i++) { const float* img0 = bottom_im2col.channel(0); img0 += i; float* tmpptr = bottom_tm.channel(i/4 + i%4); for (int q=0; q<inch*kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += out_size; } } } } }
kdtree_index.h
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ #ifndef RTABMAP_FLANN_KDTREE_INDEX_H_ #define RTABMAP_FLANN_KDTREE_INDEX_H_ #include <algorithm> #include <map> #include <cassert> #include <cstring> #include <stdarg.h> #include <cmath> #include "rtflann/general.h" #include "rtflann/algorithms/nn_index.h" #include "rtflann/util/dynamic_bitset.h" #include "rtflann/util/matrix.h" #include "rtflann/util/result_set.h" #include "rtflann/util/heap.h" #include "rtflann/util/allocator.h" #include "rtflann/util/random.h" #include "rtflann/util/saving.h" namespace rtflann { struct KDTreeIndexParams : public IndexParams { KDTreeIndexParams(int trees = 4) { (*this)["algorithm"] = FLANN_INDEX_KDTREE; (*this)["trees"] = trees; } }; /** * Randomized kd-tree index * * Contains the k-d trees and other information for indexing a set of points * for nearest-neighbor matching. */ template <typename Distance> class KDTreeIndex : public NNIndex<Distance> { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; typedef NNIndex<Distance> BaseClass; typedef bool needs_kdtree_distance; private: /*--------------------- Internal Data Structures --------------------------*/ struct Node { /** * Dimension used for subdivision. */ int divfeat; /** * The values used for subdivision. */ DistanceType divval; /** * Point data */ ElementType* point; /** * The child nodes. */ Node* child1, *child2; Node(){ child1 = NULL; child2 = NULL; } ~Node() { if (child1 != NULL) { child1->~Node(); child1 = NULL; } if (child2 != NULL) { child2->~Node(); child2 = NULL; } } private: template<typename Archive> void serialize(Archive& ar) { typedef KDTreeIndex<Distance> Index; Index* obj = static_cast<Index*>(ar.getObject()); ar & divfeat; ar & divval; bool leaf_node = false; if (Archive::is_saving::value) { leaf_node = ((child1==NULL) && (child2==NULL)); } ar & leaf_node; if (leaf_node) { if (Archive::is_loading::value) { point = obj->points_[divfeat]; } } if (!leaf_node) { if (Archive::is_loading::value) { child1 = new(obj->pool_) Node(); child2 = new(obj->pool_) Node(); } ar & *child1; ar & *child2; } } friend struct serialization::access; }; typedef Node* NodePtr; typedef BranchStruct<NodePtr, DistanceType> BranchSt; typedef BranchSt* Branch; public: /** * KDTree constructor * * Params: * inputData = dataset with the input features * params = parameters passed to the kdtree algorithm */ KDTreeIndex(const IndexParams& params = KDTreeIndexParams(), Distance d = Distance() ) : BaseClass(params, d), mean_(NULL), var_(NULL) { trees_ = get_param(index_params_,"trees",4); } /** * KDTree constructor * * Params: * inputData = dataset with the input features * params = parameters passed to the kdtree algorithm */ KDTreeIndex(const Matrix<ElementType>& dataset, const IndexParams& params = KDTreeIndexParams(), Distance d = Distance() ) : BaseClass(params,d ), mean_(NULL), var_(NULL) { trees_ = get_param(index_params_,"trees",4); setDataset(dataset); } KDTreeIndex(const KDTreeIndex& other) : BaseClass(other), trees_(other.trees_) { tree_roots_.resize(other.tree_roots_.size()); for (size_t i=0;i<tree_roots_.size();++i) { copyTree(tree_roots_[i], other.tree_roots_[i]); } } KDTreeIndex& operator=(KDTreeIndex other) { this->swap(other); return *this; } /** * Standard destructor */ virtual ~KDTreeIndex() { freeIndex(); } BaseClass* clone() const { return new KDTreeIndex(*this); } using BaseClass::buildIndex; void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2) { assert(points.cols==veclen_); size_t old_size = size_; extendDataset(points); if (rebuild_threshold>1 && size_at_build_*rebuild_threshold<size_) { buildIndex(); } else { for (size_t i=old_size;i<size_;++i) { for (int j = 0; j < trees_; j++) { addPointToTree(tree_roots_[j], i); } } } } flann_algorithm_t getType() const { return FLANN_INDEX_KDTREE; } template<typename Archive> void serialize(Archive& ar) { ar.setObject(this); ar & *static_cast<NNIndex<Distance>*>(this); ar & trees_; if (Archive::is_loading::value) { tree_roots_.resize(trees_); } for (size_t i=0;i<tree_roots_.size();++i) { if (Archive::is_loading::value) { tree_roots_[i] = new(pool_) Node(); } ar & *tree_roots_[i]; } if (Archive::is_loading::value) { index_params_["algorithm"] = getType(); index_params_["trees"] = trees_; } } void saveIndex(FILE* stream) { serialization::SaveArchive sa(stream); sa & *this; } void loadIndex(FILE* stream) { freeIndex(); serialization::LoadArchive la(stream); la & *this; } /** * Computes the inde memory usage * Returns: memory used by the index */ int usedMemory() const { return int(pool_.usedMemory+pool_.wastedMemory+size_*sizeof(int)); // pool memory and vind array memory } /** * Find set of nearest neighbors to vec. Their indices are stored inside * the result object. * * Params: * result = the result object in which the indices of the nearest-neighbors are stored * vec = the vector for which to search the nearest neighbors * maxCheck = the maximum number of restarts (in a best-bin-first manner) */ void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const { int maxChecks = searchParams.checks; float epsError = 1+searchParams.eps; if (maxChecks==FLANN_CHECKS_UNLIMITED) { if (removed_) { getExactNeighbors<true>(result, vec, epsError); } else { getExactNeighbors<false>(result, vec, epsError); } } else { if (removed_) { getNeighbors<true>(result, vec, maxChecks, epsError); } else { getNeighbors<false>(result, vec, maxChecks, epsError); } } } #ifdef FLANN_KDTREE_MEM_OPT /** * Find set of nearest neighbors to vec. Their indices are stored inside * the result object. * * Params: * result = the result object in which the indices of the nearest-neighbors are stored * vec = the vector for which to search the nearest neighbors * maxCheck = the maximum number of restarts (in a best-bin-first manner) */ void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams, Heap<BranchSt>* heap) const { int maxChecks = searchParams.checks; float epsError = 1+searchParams.eps; if (maxChecks==FLANN_CHECKS_UNLIMITED) { if (removed_) { getExactNeighbors<true>(result, vec, epsError); } else { getExactNeighbors<false>(result, vec, epsError); } } else { if (removed_) { getNeighbors<true>(result, vec, maxChecks, epsError, heap); } else { getNeighbors<false>(result, vec, maxChecks, epsError, heap); } } } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ virtual int knnSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); assert(indices.rows >= queries.rows); assert(dists.rows >= queries.rows); assert(indices.cols >= knn); assert(dists.cols >= knn); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } int count = 0; Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_); if (use_heap) { //#pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params, heap); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } else { std::vector<double> times(queries.rows); //#pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params, heap); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } std::sort(times.begin(), times.end()); } delete heap; return count; } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ virtual int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_); int count = 0; if (use_heap) { //#pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params, heap); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } else { //#pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params, heap); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } delete heap; return count; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indices of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ virtual int radiusSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; size_t num_neighbors = std::min(indices.cols, dists.cols); int max_neighbors = params.max_neighbors; if (max_neighbors<0) max_neighbors = num_neighbors; else max_neighbors = std::min(max_neighbors,(int)num_neighbors); Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_); if (max_neighbors==0) { //#pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params, heap); count += resultSet.size(); } } } else { // explicitly indicated to use unbounded radius result set // and we know there'll be enough room for resulting indices and dists if (params.max_neighbors<0 && (num_neighbors>=this->size())) { //#pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params, heap); size_t n = resultSet.size(); count += n; if (n>num_neighbors) n = num_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } else { // number of neighbors limited to max_neighbors //#pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params, heap); size_t n = resultSet.size(); count += n; if ((int)n>max_neighbors) n = max_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } } delete heap; return count; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indices of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ virtual int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_); // just count neighbors if (params.max_neighbors==0) { //#pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params, heap); count += resultSet.size(); } } } else { if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); if (params.max_neighbors<0) { // search for all neighbors //#pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params, heap); size_t n = resultSet.size(); count += n; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } else { // number of neighbors limited to max_neighbors //#pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params, heap); size_t n = resultSet.size(); count += n; if ((int)n>params.max_neighbors) n = params.max_neighbors; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } } delete heap; return count; } #endif protected: /** * Builds the index */ void buildIndexImpl() { // Create a permutable array of indices to the input vectors. std::vector<int> ind(size_); for (size_t i = 0; i < size_; ++i) { ind[i] = int(i); } mean_ = new DistanceType[veclen_]; var_ = new DistanceType[veclen_]; tree_roots_.resize(trees_); /* Construct the randomized trees. */ for (int i = 0; i < trees_; i++) { /* Randomize the order of vectors to allow for unbiased sampling. */ std::random_shuffle(ind.begin(), ind.end()); tree_roots_[i] = divideTree(&ind[0], int(size_) ); } delete[] mean_; delete[] var_; } void freeIndex() { for (size_t i=0;i<tree_roots_.size();++i) { // using placement new, so call destructor explicitly if (tree_roots_[i]!=NULL) tree_roots_[i]->~Node(); } pool_.free(); } private: void copyTree(NodePtr& dst, const NodePtr& src) { dst = new(pool_) Node(); dst->divfeat = src->divfeat; dst->divval = src->divval; if (src->child1==NULL && src->child2==NULL) { dst->point = points_[dst->divfeat]; dst->child1 = NULL; dst->child2 = NULL; } else { copyTree(dst->child1, src->child1); copyTree(dst->child2, src->child2); } } /** * Create a tree node that subdivides the list of vecs from vind[first] * to vind[last]. The routine is called recursively on each sublist. * Place a pointer to this new tree node in the location pTree. * * Params: pTree = the new node to create * first = index of the first vector * last = index of the last vector */ NodePtr divideTree(int* ind, int count) { NodePtr node = new(pool_) Node(); // allocate memory /* If too few exemplars remain, then make this a leaf node. */ if (count == 1) { node->child1 = node->child2 = NULL; /* Mark as leaf node. */ node->divfeat = *ind; /* Store index of this vec. */ node->point = points_[*ind]; } else { int idx; int cutfeat; DistanceType cutval; meanSplit(ind, count, idx, cutfeat, cutval); node->divfeat = cutfeat; node->divval = cutval; node->child1 = divideTree(ind, idx); node->child2 = divideTree(ind+idx, count-idx); } return node; } /** * Choose which feature to use in order to subdivide this set of vectors. * Make a random choice among those with the highest variance, and use * its variance as the threshold value. */ void meanSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval) { memset(mean_,0,veclen_*sizeof(DistanceType)); memset(var_,0,veclen_*sizeof(DistanceType)); /* Compute mean values. Only the first SAMPLE_MEAN values need to be sampled to get a good estimate. */ int cnt = std::min((int)SAMPLE_MEAN+1, count); for (int j = 0; j < cnt; ++j) { ElementType* v = points_[ind[j]]; for (size_t k=0; k<veclen_; ++k) { mean_[k] += v[k]; } } DistanceType div_factor = DistanceType(1)/cnt; for (size_t k=0; k<veclen_; ++k) { mean_[k] *= div_factor; } /* Compute variances (no need to divide by count). */ for (int j = 0; j < cnt; ++j) { ElementType* v = points_[ind[j]]; for (size_t k=0; k<veclen_; ++k) { DistanceType dist = v[k] - mean_[k]; var_[k] += dist * dist; } } /* Select one of the highest variance indices at random. */ cutfeat = selectDivision(var_); cutval = mean_[cutfeat]; int lim1, lim2; planeSplit(ind, count, cutfeat, cutval, lim1, lim2); if (lim1>count/2) index = lim1; else if (lim2<count/2) index = lim2; else index = count/2; /* If either list is empty, it means that all remaining features * are identical. Split in the middle to maintain a balanced tree. */ if ((lim1==count)||(lim2==0)) index = count/2; } /** * Select the top RAND_DIM largest values from v and return the index of * one of these selected at random. */ int selectDivision(DistanceType* v) { int num = 0; size_t topind[RAND_DIM]; /* Create a list of the indices of the top RAND_DIM values. */ for (size_t i = 0; i < veclen_; ++i) { if ((num < RAND_DIM)||(v[i] > v[topind[num-1]])) { /* Put this element at end of topind. */ if (num < RAND_DIM) { topind[num++] = i; /* Add to list. */ } else { topind[num-1] = i; /* Replace last element. */ } /* Bubble end value down to right location by repeated swapping. */ int j = num - 1; while (j > 0 && v[topind[j]] > v[topind[j-1]]) { std::swap(topind[j], topind[j-1]); --j; } } } /* Select a random integer in range [0,num-1], and return that index. */ int rnd = rand_int(num); return (int)topind[rnd]; } /** * Subdivide the list of points by a plane perpendicular on axe corresponding * to the 'cutfeat' dimension at 'cutval' position. * * On return: * dataset[ind[0..lim1-1]][cutfeat]<cutval * dataset[ind[lim1..lim2-1]][cutfeat]==cutval * dataset[ind[lim2..count]][cutfeat]>cutval */ void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2) { /* Move vector indices for left subtree to front of list. */ int left = 0; int right = count-1; for (;; ) { while (left<=right && points_[ind[left]][cutfeat]<cutval) ++left; while (left<=right && points_[ind[right]][cutfeat]>=cutval) --right; if (left>right) break; std::swap(ind[left], ind[right]); ++left; --right; } lim1 = left; right = count-1; for (;; ) { while (left<=right && points_[ind[left]][cutfeat]<=cutval) ++left; while (left<=right && points_[ind[right]][cutfeat]>cutval) --right; if (left>right) break; std::swap(ind[left], ind[right]); ++left; --right; } lim2 = left; } /** * Performs an exact nearest neighbor search. The exact search performs a full * traversal of the tree. */ template<bool with_removed> void getExactNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, float epsError) const { // checkID -= 1; /* Set a different unique ID for each search. */ if (trees_ > 1) { fprintf(stderr,"It doesn't make any sense to use more than one tree for exact search"); } if (trees_>0) { searchLevelExact<with_removed>(result, vec, tree_roots_[0], 0.0, epsError); } } /** * Performs the approximate nearest-neighbor search. The search is approximate * because the tree traversal is abandoned after a given number of descends in * the tree. */ template<bool with_removed> void getNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, int maxCheck, float epsError) const { int i; BranchSt branch; int checkCount = 0; Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_); DynamicBitset checked(size_); /* Search once through each tree down to root. */ for (i = 0; i < trees_; ++i) { searchLevel<with_removed>(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked); } /* Keep searching other branches from heap until finished. */ while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) { searchLevel<with_removed>(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked); } delete heap; } #ifdef FLANN_KDTREE_MEM_OPT /** * Performs the approximate nearest-neighbor search. The search is approximate * because the tree traversal is abandoned after a given number of descends in * the tree. */ template<bool with_removed> void getNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, int maxCheck, float epsError, Heap<BranchSt>* heap) const { int i; BranchSt branch; int checkCount = 0; DynamicBitset checked(size_); heap->clear(); /* Search once through each tree down to root. */ for (i = 0; i < trees_; ++i) { searchLevel<with_removed>(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked); } /* Keep searching other branches from heap until finished. */ while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) { searchLevel<with_removed>(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked); } } #endif /** * Search starting from a given node of the tree. Based on any mismatches at * higher levels, all exemplars below this level must have a distance of * at least "mindistsq". */ template<bool with_removed> void searchLevel(ResultSet<DistanceType>& result_set, const ElementType* vec, NodePtr node, DistanceType mindist, int& checkCount, int maxCheck, float epsError, Heap<BranchSt>* heap, DynamicBitset& checked) const { if (result_set.worstDist()<mindist) { // printf("Ignoring branch, too far\n"); return; } /* If this is a leaf node, then do check and return. */ if ((node->child1 == NULL)&&(node->child2 == NULL)) { int index = node->divfeat; if (with_removed) { if (removed_points_.test(index)) return; } /* Do not check same node more than once when searching multiple trees. */ if ( checked.test(index) || ((checkCount>=maxCheck)&& result_set.full()) ) return; checked.set(index); checkCount++; DistanceType dist = distance_(node->point, vec, veclen_); result_set.addPoint(dist,index); return; } /* Which child branch should be taken first? */ ElementType val = vec[node->divfeat]; DistanceType diff = val - node->divval; NodePtr bestChild = (diff < 0) ? node->child1 : node->child2; NodePtr otherChild = (diff < 0) ? node->child2 : node->child1; /* Create a branch record for the branch not taken. Add distance of this feature boundary (we don't attempt to correct for any use of this feature in a parent node, which is unlikely to happen and would have only a small effect). Don't bother adding more branches to heap after halfway point, as cost of adding exceeds their value. */ DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat); // if (2 * checkCount < maxCheck || !result.full()) { if ((new_distsq*epsError < result_set.worstDist())|| !result_set.full()) { heap->insert( BranchSt(otherChild, new_distsq) ); } /* Call recursively to search next level down. */ searchLevel<with_removed>(result_set, vec, bestChild, mindist, checkCount, maxCheck, epsError, heap, checked); } /** * Performs an exact search in the tree starting from a node. */ template<bool with_removed> void searchLevelExact(ResultSet<DistanceType>& result_set, const ElementType* vec, const NodePtr node, DistanceType mindist, const float epsError) const { /* If this is a leaf node, then do check and return. */ if ((node->child1 == NULL)&&(node->child2 == NULL)) { int index = node->divfeat; if (with_removed) { if (removed_points_.test(index)) return; // ignore removed points } DistanceType dist = distance_(node->point, vec, veclen_); result_set.addPoint(dist,index); return; } /* Which child branch should be taken first? */ ElementType val = vec[node->divfeat]; DistanceType diff = val - node->divval; NodePtr bestChild = (diff < 0) ? node->child1 : node->child2; NodePtr otherChild = (diff < 0) ? node->child2 : node->child1; /* Create a branch record for the branch not taken. Add distance of this feature boundary (we don't attempt to correct for any use of this feature in a parent node, which is unlikely to happen and would have only a small effect). Don't bother adding more branches to heap after halfway point, as cost of adding exceeds their value. */ DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat); /* Call recursively to search next level down. */ searchLevelExact<with_removed>(result_set, vec, bestChild, mindist, epsError); if (mindist*epsError<=result_set.worstDist()) { searchLevelExact<with_removed>(result_set, vec, otherChild, new_distsq, epsError); } } void addPointToTree(NodePtr node, int ind) { ElementType* point = points_[ind]; if ((node->child1==NULL) && (node->child2==NULL)) { ElementType* leaf_point = node->point; ElementType max_span = 0; size_t div_feat = 0; for (size_t i=0;i<veclen_;++i) { ElementType span = std::abs(point[i]-leaf_point[i]); if (span > max_span) { max_span = span; div_feat = i; } } NodePtr left = new(pool_) Node(); left->child1 = left->child2 = NULL; NodePtr right = new(pool_) Node(); right->child1 = right->child2 = NULL; if (point[div_feat]<leaf_point[div_feat]) { left->divfeat = ind; left->point = point; right->divfeat = node->divfeat; right->point = node->point; } else { left->divfeat = node->divfeat; left->point = node->point; right->divfeat = ind; right->point = point; } node->divfeat = div_feat; node->divval = (point[div_feat]+leaf_point[div_feat])/2; node->child1 = left; node->child2 = right; } else { if (point[node->divfeat]<node->divval) { addPointToTree(node->child1,ind); } else { addPointToTree(node->child2,ind); } } } private: void swap(KDTreeIndex& other) { BaseClass::swap(other); std::swap(trees_, other.trees_); std::swap(tree_roots_, other.tree_roots_); std::swap(pool_, other.pool_); } private: enum { /** * To improve efficiency, only SAMPLE_MEAN random values are used to * compute the mean and variance at each level when building a tree. * A value of 100 seems to perform as well as using all values. */ SAMPLE_MEAN = 100, /** * Top random dimensions to consider * * When creating random trees, the dimension on which to subdivide is * selected at random from among the top RAND_DIM dimensions with the * highest variance. A value of 5 works well. */ RAND_DIM=5 }; /** * Number of randomized trees that are used */ int trees_; DistanceType* mean_; DistanceType* var_; /** * Array of k-d trees used to find neighbours. */ std::vector<NodePtr> tree_roots_; /** * Pooled memory allocator. * * Using a pooled memory allocator is more efficient * than allocating memory directly when there is a large * number small of memory allocations. */ PooledAllocator pool_; USING_BASECLASS_SYMBOLS }; // class KDTreeIndex } #endif //FLANN_KDTREE_INDEX_H_
omp-csr.c
/* -*- mode: C; mode: folding; fill-column: 70; -*- */ /* Copyright 2010, Georgia Institute of Technology, USA. */ /* See COPYING for license. */ #include "../compat.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <alloca.h> static int64_t int64_fetch_add (int64_t* p, int64_t incr); static int64_t int64_casval(int64_t* p, int64_t oldval, int64_t newval); static int int64_cas(int64_t* p, int64_t oldval, int64_t newval); #include "../graph500.h" #include "../xalloc.h" #include "../generator/graph_generator.h" #define MINVECT_SIZE 2 static int64_t maxvtx, nv, sz; static int64_t * restrict xoff; /* Length 2*nv+2 */ static int64_t * restrict xadjstore; /* Length MINVECT_SIZE + (xoff[nv] == nedge) */ static int64_t * restrict xadj; static void find_nv (const struct packed_edge * restrict IJ, const int64_t nedge) { maxvtx = -1; OMP("omp parallel") { int64_t k, gmaxvtx, tmaxvtx = -1; OMP("omp for") for (k = 0; k < nedge; ++k) { if (get_v0_from_edge(&IJ[k]) > tmaxvtx) tmaxvtx = get_v0_from_edge(&IJ[k]); if (get_v1_from_edge(&IJ[k]) > tmaxvtx) tmaxvtx = get_v1_from_edge(&IJ[k]); } gmaxvtx = maxvtx; while (tmaxvtx > gmaxvtx) gmaxvtx = int64_casval (&maxvtx, gmaxvtx, tmaxvtx); } nv = 1+maxvtx; } static int alloc_graph (int64_t nedge) { #pragma omp parallel #pragma omp single printf("OMP (%d)\n",omp_get_num_threads()); sz = (2*nv+2) * sizeof (*xoff); xoff = xmalloc_large_ext (sz); if (!xoff) return -1; return 0; } static void free_graph (void) { xfree_large (xadjstore); xfree_large (xoff); } #define XOFF(k) (xoff[2*(k)]) #define XENDOFF(k) (xoff[1+2*(k)]) static int64_t prefix_sum (int64_t *buf) { int nt, tid; int64_t slice_begin, slice_end, t1, t2, k; nt = omp_get_num_threads (); tid = omp_get_thread_num (); t1 = nv / nt; t2 = nv % nt; slice_begin = t1 * tid + (tid < t2? tid : t2); slice_end = t1 * (tid+1) + ((tid+1) < t2? (tid+1) : t2); buf[tid] = 0; for (k = slice_begin; k < slice_end; ++k) buf[tid] += XOFF(k); OMP("omp barrier"); OMP("omp single") for (k = 1; k < nt; ++k) buf[k] += buf[k-1]; if (tid) t1 = buf[tid-1]; else t1 = 0; for (k = slice_begin; k < slice_end; ++k) { int64_t tmp = XOFF(k); XOFF(k) = t1; t1 += tmp; } OMP("omp flush (xoff)"); OMP("omp barrier"); return buf[nt-1]; } static int setup_deg_off (const struct packed_edge * restrict IJ, int64_t nedge) { int err = 0; int64_t *buf = NULL; xadj = NULL; OMP("omp parallel") { int64_t k, accum; OMP("omp for") for (k = 0; k < 2*nv+2; ++k) xoff[k] = 0; OMP("omp for") for (k = 0; k < nedge; ++k) { int64_t i = get_v0_from_edge(&IJ[k]); int64_t j = get_v1_from_edge(&IJ[k]); if (i != j) { /* Skip self-edges. */ if (i >= 0) OMP("omp atomic") ++XOFF(i); if (j >= 0) OMP("omp atomic") ++XOFF(j); } } OMP("omp single") { buf = alloca (omp_get_num_threads () * sizeof (*buf)); if (!buf) { perror ("alloca for prefix-sum hosed"); abort (); } } OMP("omp for") for (k = 0; k < nv; ++k) if (XOFF(k) < MINVECT_SIZE) XOFF(k) = MINVECT_SIZE; accum = prefix_sum (buf); OMP("omp for") for (k = 0; k < nv; ++k) XENDOFF(k) = XOFF(k); OMP("omp single") { XOFF(nv) = accum; if (!(xadjstore = xmalloc_large_ext ((XOFF(nv) + MINVECT_SIZE) * sizeof (*xadjstore)))) err = -1; if (!err) { xadj = &xadjstore[MINVECT_SIZE]; /* Cheat and permit xadj[-1] to work. */ for (k = 0; k < XOFF(nv) + MINVECT_SIZE; ++k) xadjstore[k] = -1; } } } return !xadj; } static void scatter_edge (const int64_t i, const int64_t j) { int64_t where; where = int64_fetch_add (&XENDOFF(i), 1); xadj[where] = j; } static int i64cmp (const void *a, const void *b) { const int64_t ia = *(const int64_t*)a; const int64_t ib = *(const int64_t*)b; if (ia < ib) return -1; if (ia > ib) return 1; return 0; } static void pack_vtx_edges (const int64_t i) { int64_t kcur, k; if (XOFF(i)+1 >= XENDOFF(i)) return; qsort (&xadj[XOFF(i)], XENDOFF(i)-XOFF(i), sizeof(*xadj), i64cmp); kcur = XOFF(i); for (k = XOFF(i)+1; k < XENDOFF(i); ++k) if (xadj[k] != xadj[kcur]) xadj[++kcur] = xadj[k]; ++kcur; for (k = kcur; k < XENDOFF(i); ++k) xadj[k] = -1; XENDOFF(i) = kcur; } static void pack_edges (void) { int64_t v; OMP("omp for") for (v = 0; v < nv; ++v) pack_vtx_edges (v); } static void gather_edges (const struct packed_edge * restrict IJ, int64_t nedge) { OMP("omp parallel") { int64_t k; OMP("omp for") for (k = 0; k < nedge; ++k) { int64_t i = get_v0_from_edge(&IJ[k]); int64_t j = get_v1_from_edge(&IJ[k]); if (i >= 0 && j >= 0 && i != j) { scatter_edge (i, j); scatter_edge (j, i); } } pack_edges (); } } int create_graph_from_edgelist (struct packed_edge *IJ, int64_t nedge) { find_nv (IJ, nedge); if (alloc_graph (nedge)) return -1; if (setup_deg_off (IJ, nedge)) { xfree_large (xoff); return -1; } gather_edges (IJ, nedge); return 0; } int make_bfs_tree (int64_t *bfs_tree_out, int64_t *max_vtx_out, int64_t srcvtx) { int64_t * restrict bfs_tree = bfs_tree_out; int err = 0; int64_t * restrict vlist = NULL; int64_t k1, k2; *max_vtx_out = maxvtx; vlist = xmalloc_large (nv * sizeof (*vlist)); if (!vlist) return -1; vlist[0] = srcvtx; k1 = 0; k2 = 1; bfs_tree[srcvtx] = srcvtx; #define THREAD_BUF_LEN 16384 OMP("omp parallel shared(k1, k2)") { int64_t k; int64_t nbuf[THREAD_BUF_LEN]; OMP("omp for") for (k = 0; k < srcvtx; ++k) bfs_tree[k] = -1; OMP("omp for") for (k = srcvtx+1; k < nv; ++k) bfs_tree[k] = -1; while (k1 != k2) { const int64_t oldk2 = k2; int64_t kbuf = 0; OMP("omp barrier"); OMP("omp for") for (k = k1; k < oldk2; ++k) { const int64_t v = vlist[k]; const int64_t veo = XENDOFF(v); int64_t vo; for (vo = XOFF(v); vo < veo; ++vo) { const int64_t j = xadj[vo]; if (bfs_tree[j] == -1) { if (int64_cas (&bfs_tree[j], -1, v)) { if (kbuf < THREAD_BUF_LEN) { nbuf[kbuf++] = j; } else { int64_t voff = int64_fetch_add (&k2, THREAD_BUF_LEN), vk; assert (voff + THREAD_BUF_LEN <= nv); for (vk = 0; vk < THREAD_BUF_LEN; ++vk) vlist[voff + vk] = nbuf[vk]; nbuf[0] = j; kbuf = 1; } } } } } if (kbuf) { int64_t voff = int64_fetch_add (&k2, kbuf), vk; assert (voff + kbuf <= nv); for (vk = 0; vk < kbuf; ++vk) vlist[voff + vk] = nbuf[vk]; } OMP("omp single") k1 = oldk2; OMP("omp barrier"); } } xfree_large (vlist); return err; } void destroy_graph (void) { free_graph (); } #if defined(_OPENMP) #if defined(__GNUC__)||defined(__INTEL_COMPILER) int64_t int64_fetch_add (int64_t* p, int64_t incr) { return __sync_fetch_and_add (p, incr); } int64_t int64_casval(int64_t* p, int64_t oldval, int64_t newval) { return __sync_val_compare_and_swap (p, oldval, newval); } int int64_cas(int64_t* p, int64_t oldval, int64_t newval) { return __sync_bool_compare_and_swap (p, oldval, newval); } #else /* XXX: These are not correct, but suffice for the above uses. */ int64_t int64_fetch_add (int64_t* p, int64_t incr) { int64_t t; OMP("omp critical") { t = *p; *p += incr; } OMP("omp flush (p)"); return t; } int64_t int64_casval(int64_t* p, int64_t oldval, int64_t newval) { int64_t v; OMP("omp critical (CAS)") { v = *p; if (v == oldval) *p = newval; } OMP("omp flush (p)"); return v; } int int64_cas(int64_t* p, int64_t oldval, int64_t newval) { int out = 0; OMP("omp critical (CAS)") { int64_t v = *p; if (v == oldval) { *p = newval; out = 1; } } OMP("omp flush (p)"); return out; } #endif #else int64_t int64_fetch_add (int64_t* p, int64_t incr) { int64_t t = *p; *p += incr; return t; } int64_t int64_casval(int64_t* p, int64_t oldval, int64_t newval) { int64_t v = *p; if (v == oldval) *p = newval; return v; } int int64_cas(int64_t* p, int64_t oldval, int64_t newval) { int64_t v = *p; int out = 0; if (v == oldval) { *p = newval; out = 1; } return out; } #endif
frequency_shift.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <math.h> #include <phonoc_array.h> #include <phonoc_utils.h> #include <phonon3_h/frequency_shift.h> #include <phonon3_h/real_to_reciprocal.h> static double get_frequency_shift_at_band(const int band_index, const Darray *fc3_normal_squared, const double fpoint, const double *frequencies, const int *grid_point_triplets, const int *triplet_weights, const double epsilon, const double temperature, const double unit_conversion_factor, const double cutoff_frequency); static double sum_frequency_shift_at_band(const int num_band, const double *fc3_normal_squared, const double fpoint, const double *freqs0, const double *freqs1, const double epsilon, const double temperature, const double cutoff_frequency); static double sum_frequency_shift_at_band_0K(const int num_band, const double *fc3_normal_squared, const double fpoint, const double *freqs0, const double *freqs1, const double epsilon, const double cutoff_frequency); void get_frequency_shift_at_bands(double *frequency_shift, const Darray *fc3_normal_squared, const int *band_indices, const double *frequencies, const int *grid_point_triplets, const int *triplet_weights, const double epsilon, const double temperature, const double unit_conversion_factor, const double cutoff_frequency) { int i, num_band0, num_band, gp0; double fpoint; num_band0 = fc3_normal_squared->dims[1]; num_band = fc3_normal_squared->dims[2]; gp0 = grid_point_triplets[0]; /* num_band0 and num_band_indices have to be same. */ for (i = 0; i < num_band0; i++) { fpoint = frequencies[gp0 * num_band + band_indices[i]]; frequency_shift[i] = get_frequency_shift_at_band(i, fc3_normal_squared, fpoint, frequencies, grid_point_triplets, triplet_weights, epsilon, temperature, unit_conversion_factor, cutoff_frequency); } } static double get_frequency_shift_at_band(const int band_index, const Darray *fc3_normal_squared, const double fpoint, const double *frequencies, const int *grid_point_triplets, const int *triplet_weights, const double epsilon, const double temperature, const double unit_conversion_factor, const double cutoff_frequency) { int i, num_triplets, num_band0, num_band, gp1, gp2; double shift; num_triplets = fc3_normal_squared->dims[0]; num_band0 = fc3_normal_squared->dims[1]; num_band = fc3_normal_squared->dims[2]; shift = 0; #pragma omp parallel for private(gp1, gp2) reduction(+:shift) for (i = 0; i < num_triplets; i++) { gp1 = grid_point_triplets[i * 3 + 1]; gp2 = grid_point_triplets[i * 3 + 2]; if (temperature > 0) { shift += sum_frequency_shift_at_band(num_band, fc3_normal_squared->data + i * num_band0 * num_band * num_band + band_index * num_band * num_band, fpoint, frequencies + gp1 * num_band, frequencies + gp2 * num_band, epsilon, temperature, cutoff_frequency) * triplet_weights[i] * unit_conversion_factor; } else { shift += sum_frequency_shift_at_band_0K(num_band, fc3_normal_squared->data + i * num_band0 * num_band * num_band + band_index * num_band * num_band, fpoint, frequencies + gp1 * num_band, frequencies + gp2 * num_band, epsilon, cutoff_frequency) * triplet_weights[i] * unit_conversion_factor; } } return shift; } static double sum_frequency_shift_at_band(const int num_band, const double *fc3_normal_squared, const double fpoint, const double *freqs0, const double *freqs1, const double epsilon, const double temperature, const double cutoff_frequency) { int i, j; double n2, n3, f1, f2, f3, f4, shift; /* double sum; */ shift = 0; for (i = 0; i < num_band; i++) { if (freqs0[i] > cutoff_frequency) { n2 = bose_einstein(freqs0[i], temperature); for (j = 0; j < num_band; j++) { if (freqs1[j] > cutoff_frequency) { n3 = bose_einstein(freqs1[j], temperature); f1 = fpoint + freqs0[i] + freqs1[j]; f2 = fpoint - freqs0[i] - freqs1[j]; f3 = fpoint - freqs0[i] + freqs1[j]; f4 = fpoint + freqs0[i] - freqs1[j]; /* sum = 0; */ /* if (fabs(f1) > epsilon) { */ /* sum -= (n2 + n3 + 1) / f1; */ /* } */ /* if (fabs(f2) > epsilon) { */ /* sum += (n2 + n3 + 1) / f2; */ /* } */ /* if (fabs(f3) > epsilon) { */ /* sum -= (n2 - n3) / f3; */ /* } */ /* if (fabs(f4) > epsilon) { */ /* sum += (n2 - n3) / f4; */ /* } */ /* shift += sum * fc3_normal_squared[i * num_band + j]; */ shift += (- (n2 + n3 + 1) * f1 / (f1 * f1 + epsilon * epsilon) + (n2 + n3 + 1) * f2 / (f2 * f2 + epsilon * epsilon) - (n2 - n3) * f3 / (f3 * f3 + epsilon * epsilon) + (n2 - n3) * f4 / (f4 * f4 + epsilon * epsilon)) * fc3_normal_squared[i * num_band + j]; } } } } return shift; } static double sum_frequency_shift_at_band_0K(const int num_band, const double *fc3_normal_squared, const double fpoint, const double *freqs0, const double *freqs1, const double epsilon, const double cutoff_frequency) { int i, j; double f1, f2, shift; shift = 0; for (i = 0; i < num_band; i++) { if (freqs0[i] > cutoff_frequency) { for (j = 0; j < num_band; j++) { if (freqs1[j] > cutoff_frequency) { f1 = fpoint + freqs0[i] + freqs1[j]; f2 = fpoint - freqs0[i] - freqs1[j]; shift += (- 1 * f1 / (f1 * f1 + epsilon * epsilon) + 1 * f2 / (f2 * f2 + epsilon * epsilon)) * fc3_normal_squared[i * num_band + j]; } } } } return shift; }
ex_single_master.c
#include <stdio.h> #include <omp.h> int main() { int ii; #pragma omp parallel { #pragma omp single for(ii=0;ii<10;ii++) printf("At single: iteration %d from thread %d\n", ii,omp_get_thread_num()); #pragma omp master printf("By master: %d\n",omp_get_thread_num()); } return 0; }
convolution_1x1_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_pack4_bf16s_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch) { // interleave // src = inch-outch // dst = 4b-4a-inch/4a-outch/4b #if __aarch64__ kernel_tm_pack4.create(2 * 1, inch/4, (outch/4)/2 + (outch/4)%2, (size_t)2u*16, 16); #else kernel_tm_pack4.create(1, inch/4, outch/4, (size_t)2u*16, 16); #endif int q=0; #if __aarch64__ for (; q+7<outch; q+=8) { const float* k0 = (const float*)kernel + (q+0)*inch; const float* k1 = (const float*)kernel + (q+1)*inch; const float* k2 = (const float*)kernel + (q+2)*inch; const float* k3 = (const float*)kernel + (q+3)*inch; const float* k4 = (const float*)kernel + (q+4)*inch; const float* k5 = (const float*)kernel + (q+5)*inch; const float* k6 = (const float*)kernel + (q+6)*inch; const float* k7 = (const float*)kernel + (q+7)*inch; unsigned short* g0 = kernel_tm_pack4.channel(q/8); for (int p=0; p+3<inch; p+=4) { g0[0] = float32_to_bfloat16(k0[0]); g0[1] = float32_to_bfloat16(k1[0]); g0[2] = float32_to_bfloat16(k2[0]); g0[3] = float32_to_bfloat16(k3[0]); g0[4] = float32_to_bfloat16(k4[0]); g0[5] = float32_to_bfloat16(k5[0]); g0[6] = float32_to_bfloat16(k6[0]); g0[7] = float32_to_bfloat16(k7[0]); g0[8] = float32_to_bfloat16(k0[1]); g0[9] = float32_to_bfloat16(k1[1]); g0[10] = float32_to_bfloat16(k2[1]); g0[11] = float32_to_bfloat16(k3[1]); g0[12] = float32_to_bfloat16(k4[1]); g0[13] = float32_to_bfloat16(k5[1]); g0[14] = float32_to_bfloat16(k6[1]); g0[15] = float32_to_bfloat16(k7[1]); g0[16] = float32_to_bfloat16(k0[2]); g0[17] = float32_to_bfloat16(k1[2]); g0[18] = float32_to_bfloat16(k2[2]); g0[19] = float32_to_bfloat16(k3[2]); g0[20] = float32_to_bfloat16(k4[2]); g0[21] = float32_to_bfloat16(k5[2]); g0[22] = float32_to_bfloat16(k6[2]); g0[23] = float32_to_bfloat16(k7[2]); g0[24] = float32_to_bfloat16(k0[3]); g0[25] = float32_to_bfloat16(k1[3]); g0[26] = float32_to_bfloat16(k2[3]); g0[27] = float32_to_bfloat16(k3[3]); g0[28] = float32_to_bfloat16(k4[3]); g0[29] = float32_to_bfloat16(k5[3]); g0[30] = float32_to_bfloat16(k6[3]); g0[31] = float32_to_bfloat16(k7[3]); k0 += 4; k1 += 4; k2 += 4; k3 += 4; k4 += 4; k5 += 4; k6 += 4; k7 += 4; g0 += 32; } } #endif // __aarch64__ for (; q+3<outch; q+=4) { const float* k0 = (const float*)kernel + (q+0)*inch; const float* k1 = (const float*)kernel + (q+1)*inch; const float* k2 = (const float*)kernel + (q+2)*inch; const float* k3 = (const float*)kernel + (q+3)*inch; #if __aarch64__ unsigned short* g0 = kernel_tm_pack4.channel(q/8+(q%8)/4); #else unsigned short* g0 = kernel_tm_pack4.channel(q/4); #endif for (int p=0; p+3<inch; p+=4) { g0[0] = float32_to_bfloat16(k0[0]); g0[1] = float32_to_bfloat16(k1[0]); g0[2] = float32_to_bfloat16(k2[0]); g0[3] = float32_to_bfloat16(k3[0]); g0[4] = float32_to_bfloat16(k0[1]); g0[5] = float32_to_bfloat16(k1[1]); g0[6] = float32_to_bfloat16(k2[1]); g0[7] = float32_to_bfloat16(k3[1]); g0[8] = float32_to_bfloat16(k0[2]); g0[9] = float32_to_bfloat16(k1[2]); g0[10] = float32_to_bfloat16(k2[2]); g0[11] = float32_to_bfloat16(k3[2]); g0[12] = float32_to_bfloat16(k0[3]); g0[13] = float32_to_bfloat16(k1[3]); g0[14] = float32_to_bfloat16(k2[3]); g0[15] = float32_to_bfloat16(k3[3]); k0 += 4; k1 += 4; k2 += 4; k3 += 4; g0 += 16; } } } static void conv1x1s1_sgemm_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const float* bias = _bias; // interleave Mat tmp; #if __aarch64__ if (size >= 12) tmp.create(12, inch, size/12 + (size%12)/8 + (size%12%8)/4 + (size%12%4)/2 + size%12%2, elemsize, elempack, opt.workspace_allocator); else if (size >= 8) tmp.create(8, inch, size/8 + (size%8)/4 + (size%4)/2 + size%2, elemsize, elempack, opt.workspace_allocator); else if (size >= 4) tmp.create(4, inch, size/4 + (size%4)/2 + size%2, elemsize, elempack, opt.workspace_allocator); else if (size >= 2) tmp.create(2, inch, size/2 + size%2, elemsize, elempack, opt.workspace_allocator); else // if (size >= 1) tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator); #else if (size >= 8) tmp.create(8, inch, size/8 + (size%8)/4 + (size%4)/2 + size%2, elemsize, elempack, opt.workspace_allocator); else if (size >= 4) tmp.create(4, inch, size/4 + (size%4)/2 + size%2, elemsize, elempack, opt.workspace_allocator); else if (size >= 2) tmp.create(2, inch, size/2 + size%2, elemsize, elempack, opt.workspace_allocator); else // if (size >= 1) tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator); #endif { int nn_size; int remain_size_start; #if __aarch64__ nn_size = size / 12; remain_size_start = nn_size * 12; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 12; const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; unsigned short* tmpptr = tmp.channel(i/12); for (int q=0; q<inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.4h, v5.4h, v6.4h, v7.4h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" "st1 {v4.4h}, [%1], #8 \n" "st1 {v1.8h}, [%1], #16 \n" "st1 {v5.4h}, [%1], #8 \n" "sub %0, %0, #64 \n" "st1 {v2.8h}, [%1], #16 \n" "st1 {v6.4h}, [%1], #8 \n" "st1 {v3.8h}, [%1], #16 \n" "st1 {v7.4h}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7" ); img0 += bottom_blob.cstep * 4; } } #else remain_size_start = 0; #endif nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = remain_size_start + ii * 8; const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12+(i%12)/8); #else unsigned short* tmpptr = tmp.channel(i/8); #endif for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3" ); #else asm volatile( "pld [%0, #256] \n" "vld4.u16 {d0-d3}, [%0]! \n" "pld [%0, #256] \n" "vld4.u16 {d4-d7}, [%0] \n" "sub %0, %0, #32 \n" "vst1.u16 {d0}, [%1 :64]! \n" "vst1.u16 {d4}, [%1 :64]! \n" "vst1.u16 {d1}, [%1 :64]! \n" "vst1.u16 {d5}, [%1 :64]! \n" "vst1.u16 {d2}, [%1 :64]! \n" "vst1.u16 {d6}, [%1 :64]! \n" "vst1.u16 {d3}, [%1 :64]! \n" "vst1.u16 {d7}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1", "q2", "q3" ); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = remain_size_start + ii * 4; const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4); #endif for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h}, [%0] \n" "st1 {v0.8h, v1.8h}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1" ); #else asm volatile( "pld [%0, #256] \n" "vld1.u16 {d0-d3}, [%0 :128] \n" "vst1.u16 {d0-d3}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1" ); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = remain_size_start + ii * 2; const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2); #endif for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0" ); #else asm volatile( "pld [%0, #128] \n" "vld1.u16 {d0-d1}, [%0 :128] \n" "vst1.u16 {d0-d1}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0" ); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<size; i++) { const unsigned short* img0 = bottom_blob.channel(0); img0 += i*4; #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2 + i%2); #endif for (int q=0; q<inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.4h}, [%0] \n" "st1 {v0.4h}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0" ); #else asm volatile( "pld [%0, #64] \n" "vld1.u16 {d0}, [%0 :64] \n" "vst1.u16 {d0}, [%1 :64]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0" ); #endif // __aarch64__ img0 += bottom_blob.cstep * 4; } } } int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; unsigned short* outptr0 = top_blob.channel(p); unsigned short* outptr1 = top_blob.channel(p+1); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i=0; for (; i+11<size; i+=12) { const unsigned short* tmpptr = tmp.channel(i/12); const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp); int nn = inch;// inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v8.16b, v0.16b \n" "mov v9.16b, v0.16b \n" "mov v10.16b, v0.16b \n" "mov v11.16b, v0.16b \n" "mov v12.16b, v0.16b \n" "mov v13.16b, v0.16b \n" "mov v14.16b, v0.16b \n" "mov v15.16b, v0.16b \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v1.16b \n" "mov v21.16b, v1.16b \n" "mov v22.16b, v1.16b \n" "mov v23.16b, v1.16b \n" "mov v24.16b, v1.16b \n" "mov v25.16b, v1.16b \n" "mov v26.16b, v1.16b \n" "mov v27.16b, v1.16b \n" "mov v28.16b, v1.16b \n" "mov v29.16b, v1.16b \n" "mov v30.16b, v1.16b \n" "mov v31.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n"// w0011_01 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%4], #32 \n"// w2233_01 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "shrn v14.4h, v14.4s, #16 \n" "shrn v15.4h, v15.4s, #16 \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%1], #32 \n" "st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" "st1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1], #32 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; i+7<size; i+=8) { unsigned short* tmpptr = tmp.channel(i/12+(i%12)/8); const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp); int nn = inch;// inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v0.16b \n" "mov v21.16b, v0.16b \n" "mov v22.16b, v0.16b \n" "mov v23.16b, v0.16b \n" "mov v24.16b, v1.16b \n" "mov v25.16b, v1.16b \n" "mov v26.16b, v1.16b \n" "mov v27.16b, v1.16b \n" "mov v28.16b, v1.16b \n" "mov v29.16b, v1.16b \n" "mov v30.16b, v1.16b \n" "mov v31.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"// r0 r1 r2 r3 "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n"// w0011_01 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n"// r4 r5 r6 r7 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v9.4s, v2.s[0] \n" "fmla v27.4s, v9.4s, v3.s[0] \n" "fmla v28.4s, v9.4s, v4.s[0] \n" "fmla v29.4s, v9.4s, v5.s[0] \n" "fmla v30.4s, v9.4s, v6.s[0] \n" "fmla v31.4s, v9.4s, v7.s[0] \n" "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v10.4s, v4.s[1] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v6.s[1] \n" "fmla v23.4s, v10.4s, v7.s[1] \n" "fmla v24.4s, v11.4s, v0.s[1] \n" "fmla v25.4s, v11.4s, v1.s[1] \n" "fmla v26.4s, v11.4s, v2.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v11.4s, v4.s[1] \n" "fmla v29.4s, v11.4s, v5.s[1] \n" "fmla v30.4s, v11.4s, v6.s[1] \n" "fmla v31.4s, v11.4s, v7.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n"// w2233_01 "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v12.4s, v5.s[2] \n" "fmla v22.4s, v12.4s, v6.s[2] \n" "fmla v23.4s, v12.4s, v7.s[2] \n" "fmla v24.4s, v13.4s, v0.s[2] \n" "fmla v25.4s, v13.4s, v1.s[2] \n" "fmla v26.4s, v13.4s, v2.s[2] \n" "fmla v27.4s, v13.4s, v3.s[2] \n" "fmla v28.4s, v13.4s, v4.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v13.4s, v6.s[2] \n" "fmla v31.4s, v13.4s, v7.s[2] \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v14.4s, v4.s[3] \n" "fmla v21.4s, v14.4s, v5.s[3] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "shrn v24.4h, v24.4s, #16 \n" "shrn v25.4h, v25.4s, #16 \n" "shrn v26.4h, v26.4s, #16 \n" "shrn v27.4h, v27.4s, #16 \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; i+3<size; i+=4) { unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4); const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp); int nn = inch;// inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v1.16b \n" "mov v21.16b, v1.16b \n" "mov v22.16b, v1.16b \n" "mov v23.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n"// r0 r1 r2 r3 "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n"// w0011_01 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v1.s[0] \n" "fmla v22.4s, v9.4s, v2.s[0] \n" "fmla v23.4s, v9.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n"// w2233_01 "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v11.4s, v0.s[1] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v2.s[1] \n" "fmla v23.4s, v11.4s, v3.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v13.4s, v0.s[2] \n" "fmla v21.4s, v13.4s, v1.s[2] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" "st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } for (; i+1<size; i+=2) { unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2); const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp); int nn = inch;// inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v1.16b \n" "mov v19.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4h, v1.4h}, [%3], #16 \n"// r0 r1 "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n"// w0011_01 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n"// w2233_01 "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v11.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v1.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v13.4s, v0.s[2] \n" "fmla v19.4s, v13.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "st1 {v16.4h, v17.4h}, [%1], #16 \n" "st1 {v18.4h, v19.4h}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); } for (; i<size; i++) { unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2); const unsigned short* kptr01 = (const unsigned short*)kernel.channel(pp); int nn = inch;// inch always > 0 asm volatile( "ld1 {v16.4s, v17.4s}, [%10] \n" "0: \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v0.4h}, [%3], #8 \n"// r0 "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%4], #32 \n"// w0011_01 "shll v0.4s, v0.4h, #16 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%4], #32 \n"// w2233_01 "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "st1 {v16.4h}, [%1], #8 \n" "st1 {v17.4h}, [%2], #8 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr01) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr01), "r"(biasptr) // %10 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17" ); } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { unsigned short* outptr0 = top_blob.channel(p); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i=0; #if __aarch64__ for (; i+11<size; i+=12) { unsigned short* tmpptr = tmp.channel(i/12); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p/2+p%2); int nn = inch;// inch always > 0 asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v8.16b, v0.16b \n" "mov v9.16b, v0.16b \n" "mov v10.16b, v0.16b \n" "mov v11.16b, v0.16b \n" "mov v12.16b, v0.16b \n" "mov v13.16b, v0.16b \n" "mov v14.16b, v0.16b \n" "mov v15.16b, v0.16b \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n"// w0123_0 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" "shll v20.4s, v20.4h, #16 \n" "shll v21.4s, v21.4h, #16 \n" "shll v22.4s, v22.4h, #16 \n" "shll v23.4s, v23.4h, #16 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "shll v27.4s, v27.4h, #16 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "shrn v8.4h, v8.4s, #16 \n" "shrn v9.4h, v9.4s, #16 \n" "shrn v10.4h, v10.4s, #16 \n" "shrn v11.4h, v11.4s, #16 \n" "shrn v12.4h, v12.4s, #16 \n" "shrn v13.4h, v13.4s, #16 \n" "shrn v14.4h, v14.4s, #16 \n" "shrn v15.4h, v15.4s, #16 \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "st1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%1], #32 \n" "st1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1], #32 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27" ); } #endif for (; i+7<size; i+=8) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12+(i%12)/8); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p/2+p%2); #else unsigned short* tmpptr = tmp.channel(i/8); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p); #endif int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v0.16b \n" "mov v21.16b, v0.16b \n" "mov v22.16b, v0.16b \n" "mov v23.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"// r0 r1 r2 r3 "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n"// w0123 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%2], #32 \n"// r4 r5 r6 r7 "shll v4.4s, v4.4h, #16 \n" "shll v5.4s, v5.4h, #16 \n" "shll v6.4s, v6.4h, #16 \n" "shll v7.4s, v7.4h, #16 \n" "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" "st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "vmov q10, q0 \n" "vmov q11, q0 \n" "vmov q12, q0 \n" "vmov q13, q0 \n" "vmov q14, q0 \n" "vmov q15, q0 \n" "0: \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2]! \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vshrn.u32 d18, q10, #16 \n" "vshrn.u32 d19, q11, #16 \n" "vshrn.u32 d20, q12, #16 \n" "vshrn.u32 d21, q13, #16 \n" "vshrn.u32 d22, q14, #16 \n" "vshrn.u32 d23, q15, #16 \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif } for (; i+3<size; i+=4) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p/2+p%2); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p); #endif int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"// r0 r1 r2 r3 "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n"// w0123 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "shll v2.4s, v2.4h, #16 \n" "shll v3.4s, v3.4h, #16 \n" "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19" ); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "vmov q10, q0 \n" "vmov q11, q0 \n" "0: \n" "pld [%2, #256] \n" "vld1.u16 {d4-d7}, [%2]! \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "vshll.u16 q2, d6, #16 \n" "vshll.u16 q3, d7, #16 \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vshrn.u32 d18, q10, #16 \n" "vshrn.u32 d19, q11, #16 \n" "vst1.u16 {d16-d19}, [%1]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11" ); #endif } for (; i+1<size; i+=2) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p/2+p%2); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p); #endif int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4h, v1.4h}, [%2], #16 \n"// r0 r1 "shll v0.4s, v0.4h, #16 \n" "shll v1.4s, v1.4h, #16 \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n"// w0123 "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "st1 {v16.4h, v17.4h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17" ); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "0: \n" "pld [%2, #128] \n" "vld1.u16 {d4-d5}, [%2 :128]! \n" "vshll.u16 q0, d4, #16 \n" "vshll.u16 q1, d5, #16 \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vshrn.u32 d16, q8, #16 \n" "vshrn.u32 d17, q9, #16 \n" "vst1.u16 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9" ); #endif } for (; i<size; i++) { #if __aarch64__ unsigned short* tmpptr = tmp.channel(i/12 + (i%12)/8 + (i%12%8)/4 + (i%12%4)/2 + i%12%2); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p/2+p%2); #else unsigned short* tmpptr = tmp.channel(i/8 + (i%8)/4 + (i%4)/2 + i%2); const unsigned short* kptr0 = (const unsigned short*)kernel.channel(p); #endif int nn = inch;// inch always > 0 #if __aarch64__ asm volatile( "ld1 {v16.4s}, [%8] \n" "0: \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v0.4h}, [%2], #8 \n"// r0 "shll v0.4s, v0.4h, #16 \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%3], #32 \n"// w0123 "shll v8.4s, v8.4h, #16 \n" "shll v9.4s, v9.4h, #16 \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "shrn v16.4h, v16.4s, #16 \n" "st1 {v16.4h}, [%1], #8 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16" ); #else asm volatile( "vld1.f32 {d16-d17}, [%8] \n" "0: \n" "pld [%2, #64] \n" "vld1.u16 {d1}, [%2 :64]! \n" "vshll.u16 q0, d1, #16 \n" "pld [%3, #256] \n" "vld1.u16 {d12-d15}, [%3]! \n" "vshll.u16 q4, d12, #16 \n" "vshll.u16 q5, d13, #16 \n" "vshll.u16 q6, d14, #16 \n" "vshll.u16 q7, d15, #16 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vshrn.u32 d16, q8, #16 \n" "vst1.u16 {d16}, [%1 :64]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8" ); #endif } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const short bias0 = bias ? bias[p] : 0.f; // // unsigned short* outptr0 = out0; // // for (int i=0; i<size; i++) // { // short sum = bias0; // // const unsigned short* kptr = _kernel.channel(p); // // for (int q=0; q<inch; q++) // { // const unsigned short* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2*outw + w) * 4; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<channels; p++) { const unsigned short* r0 = bottom_blob.channel(p); unsigned short* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j+3 < outw; j+=4) { uint16x4_t _v0 = vld1_u16(r0); uint16x4_t _v1 = vld1_u16(r0+8); uint16x4_t _v2 = vld1_u16(r0+16); uint16x4_t _v3 = vld1_u16(r0+24); uint16x8_t _v01 = vcombine_u16(_v0, _v1); uint16x8_t _v23 = vcombine_u16(_v2, _v3); vst1q_u16(outptr, _v01); vst1q_u16(outptr+8, _v23); r0 += 32; outptr += 16; } for (; j+1 < outw; j+=2) { uint16x4_t _v0 = vld1_u16(r0); uint16x4_t _v1 = vld1_u16(r0+8); uint16x8_t _v = vcombine_u16(_v0, _v1); vst1q_u16(outptr, _v); r0 += 16; outptr += 8; } for (; j < outw; j++) { uint16x4_t _v = vld1_u16(r0); vst1_u16(outptr, _v); r0 += 8; outptr += 4; } r0 += tailstep; } } conv1x1s1_sgemm_pack4_bf16s_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
temporal_method_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Suneth Warnakulasuriya (https://github.com/sunethwarna) // #if !defined(KRATOS_TEMPORAL_METHOD_UTILITIES_H_INCLUDED) #define KRATOS_TEMPORAL_METHOD_UTILITIES_H_INCLUDED // System includes // External includes // Project includes // Application includes #include "custom_utilities/method_utilities.h" namespace Kratos { ///@addtogroup RANSApplication ///@{ ///@name Kratos Globals ///@{ namespace TemporalMethodUtilities { template <class TContainerType, class TContainerItemType, template <class T> class TDataRetrievalFunctor, template <class T> class TDataStorageFunctor, class TDataType> void InitializeVariables( TContainerType& rContainer, const Variable<TDataType>& rOutputVariable, const Variable<TDataType>& rReferenceVariable) { if (rContainer.size() > 0) { const int number_of_items = rContainer.size(); #pragma omp parallel for for (int i = 0; i < number_of_items; ++i) { TContainerItemType& r_item = *(rContainer.begin() + i); const TDataType& r_reference_value = TDataRetrievalFunctor<TContainerItemType>()(r_item, rReferenceVariable); TDataType output_value = rOutputVariable.Zero(); MethodUtilities::DataTypeSizeInitializer<TDataType>(output_value, r_reference_value); TDataStorageFunctor<TContainerItemType>()(r_item, rOutputVariable, output_value); } } } template <class TContainerType, class TContainerItemType, template <class T> class TDataStorageFunctor> void InitializeVariables(TContainerType& rContainer, const Variable<double>& rOutputVariable, const double InitializerValue) { if (rContainer.size() > 0) { const int number_of_items = rContainer.size(); #pragma omp parallel for for (int i = 0; i < number_of_items; ++i) { TContainerItemType& r_item = *(rContainer.begin() + i); TDataStorageFunctor<TContainerItemType>()(r_item, rOutputVariable, InitializerValue); } } } } // namespace TemporalMethodUtilities } // namespace Kratos #endif // KRATOS_TEMPORAL_METHOD_UTILITIES_H_INCLUDED
ext_sweep.c
#include <stdlib.h> #include "ext_sweep.h" #include "ext_macros.h" #include "ext_kernels.h" #include "ext_problem.h" #include "ext_profiler.h" // Compute the order of the sweep for the first octant void compute_sweep_order(int** num_cells, cell** cells) { unsigned int nplanes = ichunk + ny + nz - 2; *num_cells = (int*)_mm_malloc(nplanes*sizeof(int), VEC_ALIGN); *cells = (cell*)_mm_malloc(nz*ny*ichunk*sizeof(cell), VEC_ALIGN); int* tmp_indices = (int*)_mm_malloc(nplanes*sizeof(int), VEC_ALIGN); for(int ii = 0; ii < nplanes; ++ii) { (*num_cells)[ii] = 0; tmp_indices[ii] = 0; } // Cells on each plane have equal co-ordinate sum for (unsigned int k = 0; k < nz; k++) { for (unsigned int j = 0; j < ny; j++) { for (unsigned int i = 0; i < ichunk; i++) { unsigned int n = i + j + k; (*num_cells)[n]++; } } } // Store the cell indexes in the plane array for (unsigned int k = 0; k < nz; k++) { for (unsigned int j = 0; j < ny; j++) { for (unsigned int i = 0; i < ichunk; i++) { unsigned int n = i + j + k; unsigned int offset = 0; for(int l = 0; l < n; ++l) { offset += (*num_cells)[l]; } unsigned int ind = tmp_indices[n]; (*cells)[offset + ind].i = i; (*cells)[offset + ind].j = j; (*cells)[offset + ind].k = k; tmp_indices[n]++; } } } _mm_free(tmp_indices); } // Sweep over the grid and compute the angular flux void sweep_octant( const unsigned int timestep, const unsigned int oct, const unsigned int ndiag, const cell* cells, const int* num_cells, const unsigned int num_groups_todo) { // Determine the cell step parameters for the given octant // Create the list of octant co-ordinates in order // This first bit string assumes 3 reflective boundaries //int order_3d = 0b000001010100110101011111; // This bit string is lexiographically organised // This is the order to match the original SNAP // However this required all vacuum boundaries int order_3d = 0b000001010011100101110111; int order_2d = 0b11100100; // Use the bit mask to get the right values for starting positions of the sweep int xhi = ((order_3d >> (oct * 3)) & 1) ? nx : 0; int yhi = ((order_3d >> (oct * 3 + 1)) & 1) ? ny : 0; int zhi = ((order_3d >> (oct * 3 + 2)) & 1) ? nz : 0; // Set the order you traverse each axis int istep = (xhi == nx) ? -1 : 1; int jstep = (yhi == ny) ? -1 : 1; int kstep = (zhi == nz) ? -1 : 1; size_t offset = oct*nang*nx*ny*nz*ng; double* l_flux_in = (timestep % 2 == 0 ? flux_in : flux_out) + offset; double* l_flux_out = (timestep % 2 == 0 ? flux_out : flux_in) + offset; int cells_processed = 0; #pragma omp target data if(OFFLOAD) device(MIC_DEVICE) \ map(to: cells[:ichunk*ny*nz]) for (unsigned int d = 0; d < ndiag; d++) { int ncells = num_cells[d]; sweep_cell(istep, jstep, kstep, oct, l_flux_in, l_flux_out, &(cells[cells_processed]), groups_todo, num_groups_todo, ncells); cells_processed += ncells; } } // Perform a sweep over the grid for all the octants void perform_sweep( unsigned int num_groups_todo) { // Number of planes in this octant unsigned int ndiag = ichunk + ny + nz - 2; START_PROFILING; // Get the order of cells to enqueue cell* cells; int* num_cells; compute_sweep_order(&num_cells, &cells); for (int o = 0; o < noct; o++) { sweep_octant(global_timestep, o, ndiag, cells, num_cells, num_groups_todo); zero_edge_flux_buffers(); } _mm_free(cells); _mm_free(num_cells); STOP_PROFILING(__func__, true); } // Solve the transport equations for a single angle in a single cell for a single group void sweep_cell( const int istep, const int jstep, const int kstep, const unsigned int oct, const double* restrict l_flux_in, double* restrict l_flux_out, const cell* restrict cell_index, const unsigned int * restrict groups_todo, const unsigned int num_groups_todo, const unsigned int num_cells) { #pragma omp target if(OFFLOAD) device(MIC_DEVICE) #pragma omp parallel for collapse(2) for(int nc = 0; nc < num_cells; ++nc) { for(int tg = 0; tg < num_groups_todo; ++tg) { #pragma omp simd lastprivate(nc,tg) aligned(dd_j,dd_k,mu:VEC_ALIGN) for(int a = 0; a < nang; ++a) { // Get indexes for angle and group const unsigned int i = (istep > 0) ? cell_index[nc].i : nx - cell_index[nc].i - 1; const unsigned int j = (jstep > 0) ? cell_index[nc].j : ny - cell_index[nc].j - 1; const unsigned int k = (kstep > 0) ? cell_index[nc].k : nz - cell_index[nc].k - 1; const unsigned int g = groups_todo[tg]; // Assume transmissive (vacuum boundaries) and that we // are sweeping the whole grid so have access to all neighbours // This means that we only consider the case for one MPI task // at present. // Compute angular source // Begin with first scattering moment double source_term = source(0,i,j,k,g); // Add in the anisotropic scattering source moments for (unsigned int l = 1; l < cmom; l++) { source_term += scat_coeff(l,a,oct) * source(l,i,j,k,g); } double psi = source_term + flux_i(a,g,j,k)*mu(a)*dd_i + flux_j(a,g,i,k)*dd_j(a) + flux_k(a,g,i,j)*dd_k(a); // Add contribution from last timestep flux if time-dependant if (time_delta(g) != 0.0) { psi += time_delta(g) * l_flux_in(a,g,i,j,k); } psi *= denom(a,g,i,j,k); // Compute upwind fluxes double tmp_flux_i = 2.0*psi - flux_i(a,g,j,k); double tmp_flux_j = 2.0*psi - flux_j(a,g,i,k); double tmp_flux_k = 2.0*psi - flux_k(a,g,i,j); // Time differencing on final flux value if (time_delta(g) != 0.0) { psi = 2.0 * psi - l_flux_in(a,g,i,j,k); } // Perform the fixup loop int num_to_fix = 4; // Fixup is a bounded loop as we will worst case fix up each face and centre value one after each other double zeros[4]; #pragma unroll(4) for (int fix = 0; fix < 4; fix++) { // Record which ones are zero zeros[0] = (tmp_flux_i < 0.0) ? 0.0 : 1.0; zeros[1] = (tmp_flux_j < 0.0) ? 0.0 : 1.0; zeros[2] = (tmp_flux_k < 0.0) ? 0.0 : 1.0; zeros[3] = (psi < 0.0) ? 0.0 : 1.0; if (num_to_fix == zeros[0] + zeros[1] + zeros[2] + zeros[3]) { break; } num_to_fix = zeros[0] + zeros[1] + zeros[2] + zeros[3]; // Recompute cell centre value psi = flux_i(a,g,j,k)*mu(a)*dd_i*(1.0+zeros[0]) + flux_j(a,g,j,k)*dd_j(a)*(1.0+zeros[1]) + flux_k(a,g,i,j)*dd_k(a)*(1.0+zeros[2]); if (time_delta(g) != 0.0) { psi += time_delta(g) * l_flux_in(a,g,i,j,k) * (1.0+zeros[3]); } psi = 0.5*psi + source_term; double recalc_denom = total_cross_section(g,i,j,k); recalc_denom += mu(a) * dd_i * zeros[0]; recalc_denom += dd_j(a) * zeros[1]; recalc_denom += dd_k(a) * zeros[2]; recalc_denom += time_delta(g) * zeros[3]; if (recalc_denom > 1.0E-12) { psi /= recalc_denom; } else { psi = 0.0; } // Recompute the edge fluxes with the new centre value tmp_flux_i = 2.0 * psi - flux_i(a,g,j,k); tmp_flux_j = 2.0 * psi - flux_j(a,g,i,k); tmp_flux_k = 2.0 * psi - flux_k(a,g,i,j); if (time_delta(g) != 0.0) { psi = 2.0*psi - l_flux_in(a,g,i,j,k); } } // Fix up loop is done, just need to set the final values tmp_flux_i = tmp_flux_i * zeros[0]; tmp_flux_j = tmp_flux_j * zeros[1]; tmp_flux_k = tmp_flux_k * zeros[2]; psi = psi * zeros[3]; // Write values to global memory flux_i(a,g,j,k) = tmp_flux_i; flux_j(a,g,i,k) = tmp_flux_j; flux_k(a,g,i,j) = tmp_flux_k; l_flux_out(a,g,i,j,k) = psi; } } } }
HelloWorldOMP.c
/* OpenMP example program Hello World. The master thread forks a parallel region. All threads in the team obtain their thread number and print it. Only the master thread prints the total number of threads. Compile with: gcc -O3 -fopenmp omp_hello.c -o omp_hello */ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel private(nthreads, tid) { /* Get thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); /* Only master thread does this */ if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } /* All threads join master thread and disband */ exit(0); }
llinear_openmp.c
/* mex llinear_openmp.c CFLAGS='\$CFLAGS -fopenmp -O3' LDFLAGS='\$LDFLAGS -fopenmp' */ #include <math.h> #include <stdio.h> #include <stdint.h> typedef uint16_t char16_t; #include "mex.h" #define max(a,b) (a > b) ? a : b void forward_elim(int, double **, int *); void solve(int, double **, int *, double * , double *); /*******************************************************/ void forward_elim (int matrix_size, double **A, int *L) { int i,j,k, tempi, tempk; double *S; double xmult, smax, rmax, ratio; S = calloc((matrix_size+1), sizeof(double)); for (i = 1; i <= matrix_size; i++) { L[i] = i; smax = 0.0; for (j = 1; j <= matrix_size; j++) smax = max (smax, fabs(A[i][j])); S[i] = smax; } for (k = 1; k < matrix_size; k++) { rmax = 0.0; for (i = k ; i <= matrix_size; i++) { tempi = L[i]; ratio = fabs(A[tempi][k]/ S[tempi]); if (ratio > rmax) { rmax = ratio; j = i; } } tempk = L[j]; L[j] = L[k]; L[k] = tempk; for (i = k+1; i <= matrix_size; i++) { tempi = L[i]; xmult = A[tempi][k] /A[tempk][k]; A[tempi][k] = xmult; for (j = k+1; j<= matrix_size; j++) A[tempi][j] -= xmult * A[tempk][j]; } } }/* forward_elim */ /**********************************************************/ void solve (int matrix_size, double **A, int *L, double *B, double *X) { int i,j, k, tempi, tempk, tempn; double sum; for (k = 1; k < matrix_size ; k++) { tempk = L[k]; for (i = k+1; i <= matrix_size; i++) { tempi = L[i]; B[tempi] -= A[tempi][k] * B[tempk]; } } tempn = L[matrix_size]; X[matrix_size] = B[tempn] / A[tempn][matrix_size]; for (i = matrix_size -1; i>= 1; i--) { tempi = L[i]; sum = B[tempi]; for (j = i+1; j <= matrix_size ; j++) sum -= A[tempi][j] * X[j]; X[i] = sum / A[tempi][i]; } } double lfitlinear(double *xin, double *yin, double *x, const double *bw, const int n, const int p) { int i, j, k, matrix_size = p+1; double K, Z, yout; double **A, *t, *b; int *L; A = calloc((matrix_size+1),sizeof(double *)); for (j=0;j<=matrix_size;j++){ A[j] = calloc((matrix_size+1),sizeof(double)); } t = calloc((matrix_size+1),sizeof(double)); b = calloc((matrix_size+1),sizeof(double)); L = calloc((matrix_size+1),sizeof(double)); for (i=0;i<n;i++){ /* check if i-th xin falls between (x-h,x+h) */ K = 1; for (j=1;j<=p;j++){ Z = xin[j-1+i*p]-x[j-1]; if (fabs(Z)<bw[j-1]){ Z = Z/bw[j-1]; K *= 0.75*(1-Z*Z); } else{ K = 0; break; } } if (K==0) continue; A[1][1] += K; t[1] += K*yin[i]; for (k=2;k<=matrix_size;k++){ A[1][k] += K*(xin[k-2+i*p]-x[k-2]); t[k] += K*(xin[k-2+i*p]-x[k-2])*yin[i]; } for (j=2;j<=matrix_size;j++){ for (k=j;k<=matrix_size;k++){ A[j][k] += K*(xin[j-2+i*p]-x[j-2])*(xin[k-2+i*p]-x[k-2]); } } } for (j=1;j<=matrix_size;j++){ for (k=j;k<=matrix_size;k++) A[k][j] = A[j][k]; } for (j=2;j<=matrix_size;j++) A[j][j] += 1e-8; forward_elim(matrix_size,A,L); solve(matrix_size,A,L,t,b); yout = b[1]; for (j=0;j<=matrix_size;j++) free(A[j]); free(A); free(t); free(b); free(L); return yout; } void llinear(double *xin, double *yin, double *xout, double *yout, const double *bw, const int m, const int n, const int p) { int i, j; double *x; #pragma omp parallel private(i,x) { x = calloc(p,sizeof(double *)); #pragma omp for for (i=0;i<m;i++) { for (j=0;j<p;j++){ x[j] = xout[j+i*p]; } yout[i] = (double)lfitlinear(xin,yin,x,bw,n,p); } free(x); } } /* -------------------------------------------------------------------------- Gateway function for MATLAB Usage in MATLAB: yhat = llineard(xin,yin,xout,bw); xin: input designs; p*n matrix (in fact, an p*n by 1 array) yin: input data points; n*1 array xout: grid points to estimate; p*m matrix (in fact, an p*m by 1 array) bw: bandwidth; p*1 array -------------------------------------------------------------------------- */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { double *xin, *yin, *xout, *yout, *bw; int m,n,p; /* create a pointer to the real data in the input matrix */ xin = mxGetPr(prhs[0]); /* get the value of the scalar input */ yin = mxGetPr(prhs[1]); xout = mxGetPr(prhs[2]); bw = mxGetPr(prhs[3]); /* get dimensions of the input matrix */ p = mxGetM(prhs[0]); /* dimension of xin */ n = mxGetN(prhs[0]); /* length of xin */ m = mxGetN(prhs[2]); /* length of xout */ /* create the output matrix */ plhs[0] = mxCreateDoubleMatrix(m,1,mxREAL); /* get a pointer to the real data in the output matrix */ yout = mxGetPr(plhs[0]); /* call the computational routine */ llinear(xin,yin,xout,yout,bw,m,n,p); }
pr32362-2.c
/* PR middle-end/32362 */ /* { dg-do run } */ /* { dg-options "-O2" } */ #include <omp.h> #include <stdlib.h> int a = 2, b = 4; int main () { int n[4] = { -1, -1, -1, -1 }; omp_set_num_threads (4); omp_set_dynamic (0); omp_set_nested (1); #pragma omp parallel private(b) { b = omp_get_thread_num (); #pragma omp parallel firstprivate(a) { a = (omp_get_thread_num () + a) + 1; if (b == omp_get_thread_num ()) n[omp_get_thread_num ()] = a + (b << 4); } } if (n[0] != 3) abort (); if (n[3] != -1 && (n[1] != 0x14 || n[2] != 0x25 || n[3] != 0x36)) abort (); return 0; }
core_zsyssq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include "core_blas.h" #include "plasma_types.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ void core_zsyssq(plasma_enum_t uplo, int n, const plasma_complex64_t *A, int lda, double *scale, double *sumsq) { int ione = 1; if (uplo == PlasmaUpper) { for (int j = 1; j < n; j++) // TODO: Inline this operation. LAPACK_zlassq(&j, &A[lda*j], &ione, scale, sumsq); } else { // PlasmaLower for (int j = 0; j < n-1; j++) { int len = n-j-1; // TODO: Inline this operation. LAPACK_zlassq(&len, &A[lda*j+j+1], &ione, scale, sumsq); } } *sumsq *= 2.0; for (int i = 0; i < n; i++) { // diagonal is complex, don't ignore complex part double absa = cabs(A[lda*i+i]); if (absa != 0.0) { // != propagates nan if (*scale < absa) { *sumsq = 1.0 + *sumsq*((*scale/absa)*(*scale/absa)); *scale = absa; } else { *sumsq = *sumsq + ((absa/(*scale))*(absa/(*scale))); } } } } /******************************************************************************/ void core_omp_zsyssq(plasma_enum_t uplo, int n, const plasma_complex64_t *A, int lda, double *scale, double *sumsq, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:scale[0:n]) \ depend(out:sumsq[0:n]) { if (sequence->status == PlasmaSuccess) { *scale = 0.0; *sumsq = 1.0; core_zsyssq(uplo, n, A, lda, scale, sumsq); } } } /******************************************************************************/ void core_omp_zsyssq_aux(int m, int n, const double *scale, const double *sumsq, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:scale[0:n]) \ depend(in:sumsq[0:n]) \ depend(out:value[0:1]) { if (sequence->status == PlasmaSuccess) { double scl = 0.0; double sum = 1.0; for (int j = 0; j < n; j++) { for (int i = j+1; i < n; i++) { int idx = m*j+i; if (scl < scale[idx]) { sum = sumsq[idx] + sum*((scl/scale[idx])*(scl/scale[idx])); scl = scale[idx]; } else { sum = sum + sumsq[idx]*((scale[idx]/scl)*(scale[idx]/scl)); } } } sum = 2.0*sum; for (int j = 0; j < n; j++) { int idx = m*j+j; if (scl < scale[idx]) { sum = sumsq[idx] + sum*((scl/scale[idx])*(scl/scale[idx])); scl = scale[idx]; } else { sum = sum + sumsq[idx]*((scale[idx]/scl)*(scale[idx]/scl)); } } *value = scl*sqrt(sum); } } }
detector.c
#include "darknet.h" #include <stdio.h> #ifdef WIN32 #include "unistd\dirent.h" #else #include <dirent.h> #endif #ifdef WIN32 #include "unistd\unistd.h" #else #include <unistd.h> #endif #include <sys/stat.h> #define class temp struct stat st; static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90}; void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear) { list *options = read_data_cfg(datacfg); char *train_images = option_find_str(options, "train", "data/train.list"); char *backup_directory = option_find_str(options, "backup", "/backup/"); srand(time(0)); char *base = basecfg(cfgfile); printf("%s\n", base); float avg_loss = -1; network **nets = (network**)calloc(ngpus, sizeof(network*)); srand(time(0)); int seed = rand(); int i; for(i = 0; i < ngpus; ++i){ srand(seed); #ifdef GPU if(gpu_index >= 0){ opencl_set_device(i); } #endif nets[i] = load_network(cfgfile, weightfile, clear); nets[i]->learning_rate *= ngpus; } srand(time(0)); network *net = nets[0]; int imgs = net->batch * net->subdivisions * ngpus; #ifndef BENCHMARK printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); #endif data train, buffer; layer l = net->layers[net->n - 1]; int classes = l.classes; float jitter = l.jitter; list *plist = get_paths(train_images); //int N = plist->size; char **paths = (char **)list_to_array(plist); load_args args = get_base_args(net); args.coords = l.coords; args.paths = paths; args.n = imgs; args.m = plist->size; args.classes = classes; args.jitter = jitter; args.num_boxes = l.max_boxes; args.d = &buffer; args.type = DETECTION_DATA; //args.type = INSTANCE_DATA; args.threads = 64; pthread_t load_thread = load_data(args); #ifdef LOSS_ONLY double time=what_time_is_it_now(); #else double time; #endif int count = 0; if(count == 0) { #ifdef GPU if (gpu_index >= 0) { if (ngpus != 1) sync_nets(nets, ngpus, 0); } #endif char buff[256]; sprintf(buff, "%s/%s.start.conv.weights", backup_directory, base); save_weights(net, buff); } int max_size = ((net->w + net->h)/2); //while(i*imgs < N*120){ while(get_current_batch(net) < net->max_batches){ if(l.random && count++%10 == 0){ #if !defined(BENCHMARK) && !defined(LOSS_ONLY) printf("Resizing\n"); #endif int dim = max_size - ((rand() % 8) * 32); #ifdef BENCHMARK dim = 608; #endif if (get_current_batch(net)+200 > net->max_batches) dim = max_size; if (net->w < dim || net->h < dim) dim = max_size; #if !defined(BENCHMARK) && !defined(LOSS_ONLY) printf("%d\n", dim); #endif args.w = dim; args.h = dim; pthread_join(load_thread, 0); train = buffer; free_data(train); load_thread = load_data(args); #pragma omp parallel for for(i = 0; i < ngpus; ++i){ resize_network(nets[i], dim, dim); } net = nets[0]; } #ifndef LOSS_ONLY time=what_time_is_it_now(); #endif pthread_join(load_thread, 0); train = buffer; load_thread = load_data(args); /* int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[10] + 1 + k*5); if(!b.x) break; printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h); } */ /* int zz; for(zz = 0; zz < train.X.cols; ++zz){ image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]); int k; for(k = 0; k < l.max_boxes; ++k){ box b = float_to_box(train.y.vals[zz] + k*5, 1); printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); draw_bbox(im, b, 1, 1,0,0); } show_image(im, "truth11"); cvWaitKey(0); save_image(im, "truth11"); } */ #ifndef LOSS_ONLY printf("Loaded: %lf seconds\n", what_time_is_it_now()-time); #endif #ifndef LOSS_ONLY time=what_time_is_it_now(); #endif float loss = 0; #ifdef GPU if (gpu_index >= 0) { if (ngpus == 1) { loss = train_network(net, train); } else { loss = train_networks(nets, ngpus, train, 4); } } else { loss = train_network(net, train); } #else loss = train_network(net, train); #endif if (avg_loss < 0) avg_loss = loss; avg_loss = avg_loss*.9 + loss*.1; i = get_current_batch(net); #ifdef LOSS_ONLY printf("%lf\t%f\n", what_time_is_it_now()-time, loss); #else printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs); #endif #ifdef GPU if (loss != loss && gpu_index >= 0) { opencl_deinit(gpusg, ngpusg); } #endif if(loss != loss) { printf("NaN LOSS detected! No possible to continue!\n"); exit(-7); } if(i%100==0){ #ifdef GPU if (gpu_index >= 0) { if (ngpus != 1) sync_nets(nets, ngpus, 0); } #endif char buff[256]; sprintf(buff, "%s/%s.backup", backup_directory, base); save_weights(net, buff); } if(i%10000==0 || (i < 1000 && i%100 == 0)){ #ifdef GPU if (gpu_index >= 0) { if (ngpus != 1) sync_nets(nets, ngpus, 0); } #endif char buff[256]; sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i); save_weights(net, buff); } free_data(train); #ifdef GPU_STATS opencl_dump_mem_stat(); #endif #ifdef BENCHMARK break; #endif } #ifdef GPU if (gpu_index >= 0) { if (ngpus != 1) sync_nets(nets, ngpus, 0); } #endif char buff[256]; sprintf(buff, "%s/%s_final.weights", backup_directory, base); save_weights(net, buff); free(paths); free(plist); free(base); free(nets); free(options); } static int get_coco_image_id(char *filename) { char *p = strrchr(filename, '/'); char *c = strrchr(filename, '_'); if(c) p = c; return atoi(p+1); } static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h) { int i, j; int image_id = get_coco_image_id(image_path); for(i = 0; i < num_boxes; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; float bx = xmin; float by = ymin; float bw = xmax - xmin; float bh = ymax - ymin; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]); } } } void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1; float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1; float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1; float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1; if (xmin < 1) xmin = 1; if (ymin < 1) ymin = 1; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j], xmin, ymin, xmax, ymax); } } } void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h) { int i, j; for(i = 0; i < total; ++i){ float xmin = dets[i].bbox.x - dets[i].bbox.w/2.; float xmax = dets[i].bbox.x + dets[i].bbox.w/2.; float ymin = dets[i].bbox.y - dets[i].bbox.h/2.; float ymax = dets[i].bbox.y + dets[i].bbox.h/2.; if (xmin < 0) xmin = 0; if (ymin < 0) ymin = 0; if (xmax > w) xmax = w; if (ymax > h) ymax = h; for(j = 0; j < classes; ++j){ int class = j; if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class], xmin, ymin, xmax, ymax); } } } void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 2); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = (FILE**)calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = (image*)calloc(nthreads, sizeof(image)); image *val_resized = (image*)calloc(nthreads, sizeof(image)); image *buf = (image*)calloc(nthreads, sizeof(image)); image *buf_resized = (image*)calloc(nthreads, sizeof(image)); pthread_t *thr = (pthread_t*)calloc(nthreads, sizeof(pthread_t)); image input = make_image(net->w, net->h, net->c*2); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1); flip_image(val_resized[t]); copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1); network_predict(net, input.data); int w = val[t].w; int h = val[t].h; int num = 0; int nboxes = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } if (coco){ print_cocos(fp, path, dets, num, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h); } else { print_detector_detections(fps, id, dets, num, classes, w, h); } free_detections(dets, num); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile) { int j; list *options = read_data_cfg(datacfg); char *valid_images = option_find_str(options, "valid", "data/train.list"); char *name_list = option_find_str(options, "names", "data/names.list"); char *prefix = option_find_str(options, "results", "results"); char **names = get_labels(name_list); char *mapf = option_find_str(options, "map", 0); int *map = 0; if (mapf) map = read_map(mapf); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); list *plist = get_paths(valid_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int classes = l.classes; char buff[1024]; char *type = option_find_str(options, "eval", "voc"); FILE *fp = 0; FILE **fps = 0; int coco = 0; int imagenet = 0; if(0==strcmp(type, "coco")){ if(!outfile) outfile = "coco_results"; snprintf(buff, 1024, "%s/%s.json", prefix, outfile); fp = fopen(buff, "w"); fprintf(fp, "[\n"); coco = 1; } else if(0==strcmp(type, "imagenet")){ if(!outfile) outfile = "imagenet-detection"; snprintf(buff, 1024, "%s/%s.txt", prefix, outfile); fp = fopen(buff, "w"); imagenet = 1; classes = 200; } else { if(!outfile) outfile = "comp4_det_test_"; fps = (FILE**)calloc(classes, sizeof(FILE *)); for(j = 0; j < classes; ++j){ snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]); fps[j] = fopen(buff, "w"); } } int m = plist->size; int i=0; int t; float thresh = .005; float nms = .45; int nthreads = 4; image *val = (image*)calloc(nthreads, sizeof(image)); image *val_resized = (image*)calloc(nthreads, sizeof(image)); image *buf = (image*)calloc(nthreads, sizeof(image)); image *buf_resized = (image*)calloc(nthreads, sizeof(image)); pthread_t *thr = (pthread_t*)calloc(nthreads, sizeof(pthread_t)); load_args args = {0}; args.w = net->w; args.h = net->h; //args.type = IMAGE_DATA; args.type = LETTERBOX_DATA; for(t = 0; t < nthreads; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } double start = what_time_is_it_now(); for(i = nthreads; i < m+nthreads; i += nthreads){ fprintf(stderr, "%d\n", i); for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ pthread_join(thr[t], 0); val[t] = buf[t]; val_resized[t] = buf_resized[t]; } for(t = 0; t < nthreads && i+t < m; ++t){ args.path = paths[i+t]; args.im = &buf[t]; args.resized = &buf_resized[t]; thr[t] = load_data_in_thread(args); } for(t = 0; t < nthreads && i+t-nthreads < m; ++t){ char *path = paths[i+t-nthreads]; char *id = basecfg(path); float *X = val_resized[t].data; network_predict(net, X); int w = val[t].w; int h = val[t].h; int nboxes = 0; detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } if (coco){ print_cocos(fp, path, dets, nboxes, classes, w, h); } else if (imagenet){ print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h); } else { print_detector_detections(fps, id, dets, nboxes, classes, w, h); } free_detections(dets, nboxes); free(id); free_image(val[t]); free_image(val_resized[t]); } } for(j = 0; j < classes; ++j){ if(fps) fclose(fps[j]); } if(coco){ fseek(fp, -2, SEEK_CUR); fprintf(fp, "\n]\n"); fclose(fp); } fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start); } void validate_detector_recall(char *datacfg, char *cfgfile, char *weightfile) { network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay); srand(time(0)); //list *plist = get_paths("data/coco_val_5k.list"); list *options = read_data_cfg(datacfg); char *test_images = option_find_str(options, "test", "data/test.list"); list *plist = get_paths(test_images); char **paths = (char **)list_to_array(plist); layer l = net->layers[net->n-1]; int j, k; int m = plist->size; int i=0; float thresh = .001; float iou_thresh = .5; float nms = .4; int total = 0; int correct = 0; int proposals = 0; float avg_iou = 0; for(i = 0; i < m; ++i){ char *path = paths[i]; image orig = load_image_color(path, 0, 0); image sized = resize_image(orig, net->w, net->h); char *id = basecfg(path); network_predict(net, sized.data); int nboxes = 0; detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int num_labels = 0; box_label *truth = read_boxes(labelpath, &num_labels); for(k = 0; k < nboxes; ++k){ if(dets[k].objectness > thresh){ ++proposals; } } for (j = 0; j < num_labels; ++j) { ++total; box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h}; float best_iou = 0; for(k = 0; k < l.w*l.h*l.n; ++k){ float iou = box_iou(dets[k].bbox, t); if(dets[k].objectness > thresh && iou > best_iou){ best_iou = iou; } } avg_iou += best_iou; if(best_iou > iou_thresh){ ++correct; } } fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total); free(id); free_image(orig); free_image(sized); } } void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen) { list *options = read_data_cfg(datacfg); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); image **alphabet = load_alphabet(); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); double time; char buff[256]; char *input = buff; float nms=.45; while(1){ if(filename){ strncpy(input, filename, 256); } else { printf("Enter Image Path: "); fflush(stdout); input = fgets(input, 256, stdin); if(!input) return; strtok(input, "\n"); } image im = load_image_color(input,0,0); int resize = im.w != net->w || im.h != net->h; image sized = resize ? letterbox_image(im, net->w, net->h) : im; //image sized = resize_image(im, net->w, net->h); //image sized2 = resize_max(im, net->w); //image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h); //resize_network(net, sized.w, sized.h); layer l = net->layers[net->n-1]; float *X = sized.data; time=what_time_is_it_now(); if (l.type == DETECTION || l.type == REGION || l.type == YOLO) { network_predict(net, X); } if (l.type == YOLO4) { network_predict_y4(net, X); } printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time); int nboxes = 0; detection *dets = 0; if (l.type == DETECTION || l.type == REGION || l.type == YOLO) { dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); } if (l.type == YOLO4) { dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); } //printf("%d\n", nboxes); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } if (l.type == DETECTION || l.type == REGION || l.type == YOLO) { draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, 0); } if (l.type == YOLO4) { draw_detections_v3(im, dets, nboxes, thresh, names, alphabet, l.classes, 0); } free_detections(dets, nboxes); if(outfile){ save_image(im, outfile); } else{ save_image(im, "predictions"); #ifdef OPENCV show_image(im, "predictions", 0); #endif } free_image(im); if (resize) free_image(sized); if (filename) break; } } int exists(const char *fname, const char* ext) { FILE *file; if (strstr(fname, ext) && (file = fopen(fname, "r"))) { fclose(file); return 1; } return 0; } int empty(char *dirname) { int n = 0; struct dirent *d; DIR *dir = opendir(dirname); if (dir == NULL) // not a dir or doesn't exist return 1; while ((d = readdir(dir)) != NULL) { if(++n > 2) break; } closedir(dir); if (n <= 2) //dir empty return 1; else return 0; } void test_ddetector(char *datacfg, char *cfgfile, char *weightfile, char *in_dir, float thresh, float hier_thresh, char *out_dir) { list *options = read_data_cfg(datacfg); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); image **alphabet = load_alphabet(); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); double time; char buff[256]; char *input = buff; float nms=.45; char fname[256]; char ffname[1024]; char ffoname[1024]; struct dirent *de = NULL; while(1) { while (empty(in_dir)) { usleep(100); } DIR *dr = opendir(in_dir); while ((de = readdir(dr)) != NULL) { printf("%s\n", de->d_name); strcpy(fname, de->d_name); strcpy(ffname, in_dir); strcat(ffname, "/"); strcat(ffname, fname); if (!exists(ffname, ".jpg")) continue; if (1) { strcpy(ffoname, out_dir); strcat(ffoname, "/"); strcat(ffoname, fname); int len = strlen(ffoname) - 4; ffoname[len] = '\0'; strncpy(input, ffname, 256); } else { printf("Enter Image Path: "); fflush(stdout); input = fgets(input, 256, stdin); if (!input) continue; strtok(input, "\n"); } off_t size = 0; off_t offs = 0; do { offs = size; stat(input, &st); size = st.st_size; if (offs != size) usleep(10); else break; } while (1); image im = load_image_color(input, 0, 0); int resize = im.w != net->w || im.h != net->h; image sized = resize ? letterbox_image(im, net->w, net->h) : im; //image sized = resize_image(im, net->w, net->h); //image sized2 = resize_max(im, net->w); //image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h); //resize_network(net, sized.w, sized.h); layer l = net->layers[net->n - 1]; float *X = sized.data; time = what_time_is_it_now(); network_predict(net, X); printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now() - time); int nboxes = 0; detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes); //printf("%d\n", nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes, 0); free_detections(dets, nboxes); free_image(im); if (resize) free_image(sized); // if (filename) break; remove(input); } closedir(dr); } } /* void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; float nms = .45; while(1){ image in = get_image_from_stream_cv(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; float *X = in_s.data; network_predict(net, X); int nboxes = 0; detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int left = b.x-b.w/2.; int top = b.y-b.h/2.; censor_image(in, left, top, b.w, b.h); } } show_image(in, base); cvWaitKey(10); free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream_cv(cap); free_image(in); } } #endif } void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip) { #ifdef OPENCV char *base = basecfg(cfgfile); network *net = load_network(cfgfile, weightfile, 0); set_batch_network(net, 1); srand(2222222); CvCapture * cap; int w = 1280; int h = 720; if(filename){ cap = cvCaptureFromFile(filename); }else{ cap = cvCaptureFromCAM(cam_index); } if(w){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w); } if(h){ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h); } if(!cap) error("Couldn't connect to webcam.\n"); cvNamedWindow(base, CV_WINDOW_NORMAL); cvResizeWindow(base, 512, 512); float fps = 0; int i; int count = 0; float nms = .45; while(1){ image in = get_image_from_stream_cv(cap); //image in_s = resize_image(in, net->w, net->h); image in_s = letterbox_image(in, net->w, net->h); layer l = net->layers[net->n-1]; show_image(in, base); int nboxes = 0; float *X = in_s.data; network_predict(net, X); detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes); //if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } for(i = 0; i < nboxes; ++i){ if(dets[i].prob[class] > thresh){ box b = dets[i].bbox; int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h; int dx = b.x*in.w-size/2.; int dy = b.y*in.h-size/2.; image bim = crop_image(in, dx, dy, size, size); char buff[2048]; sprintf(buff, "results/extract/%07d", count); ++count; save_image(bim, buff); free_image(bim); } } free_detections(dets, nboxes); free_image(in_s); free_image(in); float curr = 0; fps = .9*fps + .1*curr; for(i = 0; i < skip; ++i){ image in = get_image_from_stream_cv(cap); free_image(in); } } #endif } */ /* void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets) { network_predict_image(net, im); layer l = net->layers[net->n-1]; int nboxes = num_boxes(net); fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets); if (nms) { if (l.nms_kind == DEFAULT_NMS) do_nms_sort(dets, nboxes, l.classes, nms); else diounms_sort_y4(dets, nboxes, l.classes, nms, l.nms_kind, l.beta_nms); } } */ void run_detector(int argc, char **argv) { char *prefix = find_char_arg(argc, argv, "-prefix", 0); float thresh = find_float_arg(argc, argv, "-thresh", .5); float hier_thresh = find_float_arg(argc, argv, "-hier", .5); int cam_index = find_int_arg(argc, argv, "-c", 0); int frame_skip = find_int_arg(argc, argv, "-s", 0); int avg = find_int_arg(argc, argv, "-avg", 3); if(argc < 4){ fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]); return; } char *gpu_list = find_char_arg(argc, argv, "-gpus", 0); char *outfile = find_char_arg(argc, argv, "-out", 0); int *gpus = 0; int gpu = 0; int ngpus = 0; if(gpu_list){ printf("%s\n", gpu_list); int len = strlen(gpu_list); ngpus = 1; int i; for(i = 0; i < len; ++i){ if (gpu_list[i] == ',') ++ngpus; } gpus = (int*)calloc(ngpus, sizeof(int)); for(i = 0; i < ngpus; ++i){ gpus[i] = atoi(gpu_list); gpu_list = strchr(gpu_list, ',')+1; } } else { gpu = gpu_index; gpus = &gpu; ngpus = 1; } int clear = find_arg(argc, argv, "-clear"); int fullscreen = find_arg(argc, argv, "-fullscreen"); int width = find_int_arg(argc, argv, "-w", 0); int height = find_int_arg(argc, argv, "-h", 0); int fps = find_int_arg(argc, argv, "-fps", 0); //int class = find_int_arg(argc, argv, "-class", 0); char *datacfg = argv[3]; char *cfg = argv[4]; char *weights = (argc > 5) ? argv[5] : 0; char *filename = (argc > 6) ? argv[6]: 0; if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen); else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear); else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile); else if(0==strcmp(argv[2], "recall")) validate_detector_recall(datacfg, cfg, weights); else if(0==strcmp(argv[2], "demo")) { list *options = read_data_cfg(datacfg); int classes = option_find_int(options, "classes", 20); char *name_list = option_find_str(options, "names", "data/names.list"); char **names = get_labels(name_list); demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen); } //else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); //else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip); } #undef class
omp_thread_attach_test_1.c
// execute in sequence // input the number of num_user_threadsation #include <stdlib.h> #include <pthread.h> #include <omp.h> #include <sys/timeb.h> #include <omp_interop.h> #include <unistd.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } // calling thread_attach() with NULL parameters void *test_fun(void *arg){ printf("thread: %d\n", *((int*)arg)); omp_thread_attach(NULL, NULL); return ((void*)0); } // calling thread_attach() with a specified stack void *test_fun_new_stack(void *arg){ printf("thread: %d\n", *((int*)arg)); void * stack = malloc(4096); omp_thread_attach(stack, NULL); free(stack); return ((void*)0); } int main(int argc, char * argv[]) { if (argc >= 2){ omp_set_num_threads(atoi(argv[1])); num_user_threads = atoi(argv[1]); } int num_user_threads = 100; pthread_t pthreads[num_user_threads]; // pthread_create, and attach to OMP runtime for(i=0; i<num_user_threads; i++){ pthread_create(&pthreads[i], NULL, test_fun, i); } // omp_set_nested(); // create 50 threads and put them into threadpool #pragma omp parallel shared(user_thread_id, counter) private(tid) num_threads(50) { tid = omp_get_thread_num(); } omp_quiesce(); /* terminate OpenMP runtime so user threads return to pthread */ for(i=0; i<num_user_threads; i++) { pthread_join(pthreads[i], NULL); } // while(1); }
GB_binop__lt_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lt_fp64 // A.*B function (eWiseMult): GB_AemultB__lt_fp64 // A*D function (colscale): GB_AxD__lt_fp64 // D*A function (rowscale): GB_DxB__lt_fp64 // C+=B function (dense accum): GB_Cdense_accumB__lt_fp64 // C+=b function (dense accum): GB_Cdense_accumb__lt_fp64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_fp64 // C=scalar+B GB_bind1st__lt_fp64 // C=scalar+B' GB_bind1st_tran__lt_fp64 // C=A+scalar GB_bind2nd__lt_fp64 // C=A'+scalar GB_bind2nd_tran__lt_fp64 // C type: bool // A type: double // B,b type: double // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_FP64 || GxB_NO_LT_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lt_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lt_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lt_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lt_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lt_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lt_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lt_fp64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lt_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; double bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lt_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__lt_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__lt_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fac_zero_stencilcoef.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.17 $ ***********************************************************************EHEADER*/ #include "_hypre_sstruct_ls.h" #include "fac.h" #define AbsStencilShape(stencil, abs_shape) \ { \ HYPRE_Int ii,jj,kk; \ ii = hypre_IndexX(stencil); \ jj = hypre_IndexY(stencil); \ kk = hypre_IndexZ(stencil); \ abs_shape= abs(ii) + abs(jj) + abs(kk); \ } /*-------------------------------------------------------------------------- * hypre_FacZeroCFSten: Zeroes the coarse stencil coefficients that reach * into an underlying coarsened refinement box. * Algo: For each cbox * { * 1) refine cbox and expand by one in each direction * 2) boxman_intersect with the fboxman * 3) loop over intersection boxes to see if stencil * reaches over. * } *--------------------------------------------------------------------------*/ HYPRE_Int hypre_FacZeroCFSten( hypre_SStructPMatrix *Af, hypre_SStructPMatrix *Ac, hypre_SStructGrid *grid, HYPRE_Int fine_part, hypre_Index rfactors ) { hypre_BoxManager *fboxman; hypre_BoxManEntry **boxman_entries; HYPRE_Int nboxman_entries; hypre_SStructPGrid *p_cgrid; hypre_Box fgrid_box; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_Box scaled_box; hypre_Box *shift_ibox; hypre_StructMatrix *smatrix; hypre_StructStencil *stencils; HYPRE_Int stencil_size; hypre_Index refine_factors, upper_shift; hypre_Index stride; hypre_Index stencil_shape; hypre_Index zero_index, ilower, iupper; HYPRE_Int nvars, var1, var2; HYPRE_Int ndim; hypre_Box *ac_dbox; double *ac_ptr; hypre_Index loop_size; HYPRE_Int iac; HYPRE_Int ci, i, j; HYPRE_Int abs_shape; HYPRE_Int ierr = 0; p_cgrid = hypre_SStructPMatrixPGrid(Ac); nvars = hypre_SStructPMatrixNVars(Ac); ndim = hypre_SStructPGridNDim(p_cgrid); hypre_ClearIndex(zero_index); hypre_ClearIndex(stride); hypre_ClearIndex(upper_shift); for (i= 0; i< ndim; i++) { stride[i]= 1; upper_shift[i]= rfactors[i]-1; } hypre_CopyIndex(rfactors, refine_factors); if (ndim < 3) { for (i= ndim; i< 3; i++) { refine_factors[i]= 1; } } for (var1= 0; var1< nvars; var1++) { cgrid= hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(Ac), var1); cgrid_boxes= hypre_StructGridBoxes(cgrid); fboxman= hypre_SStructGridBoxManager(grid, fine_part, var1); /*------------------------------------------------------------------ * For each parent coarse box find all fboxes that may be connected * through a stencil entry- refine this box, expand it by one * in each direction, and boxman_intersect with fboxman *------------------------------------------------------------------*/ hypre_ForBoxI(ci, cgrid_boxes) { cgrid_box= hypre_BoxArrayBox(cgrid_boxes, ci); hypre_StructMapCoarseToFine(hypre_BoxIMin(cgrid_box), zero_index, refine_factors, hypre_BoxIMin(&scaled_box)); hypre_StructMapCoarseToFine(hypre_BoxIMax(cgrid_box), upper_shift, refine_factors, hypre_BoxIMax(&scaled_box)); hypre_SubtractIndex(hypre_BoxIMin(&scaled_box), stride, hypre_BoxIMin(&scaled_box)); hypre_AddIndex(hypre_BoxIMax(&scaled_box), stride, hypre_BoxIMax(&scaled_box)); hypre_BoxManIntersect(fboxman, hypre_BoxIMin(&scaled_box), hypre_BoxIMax(&scaled_box), &boxman_entries, &nboxman_entries); for (var2= 0; var2< nvars; var2++) { stencils= hypre_SStructPMatrixSStencil(Ac, var1, var2); if (stencils != NULL) { stencil_size= hypre_StructStencilSize(stencils); smatrix = hypre_SStructPMatrixSMatrix(Ac, var1, var2); ac_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(smatrix), ci); /*--------------------------------------------------------- * Find the stencil coefficients that must be zeroed off. * Loop over all possible boxes. *---------------------------------------------------------*/ for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape); AbsStencilShape(stencil_shape, abs_shape); if (abs_shape) /* non-centre stencils are zeroed */ { /* look for connecting fboxes that must be zeroed. */ for (j= 0; j< nboxman_entries; j++) { hypre_BoxManEntryGetExtents(boxman_entries[j], ilower, iupper); hypre_BoxSetExtents(&fgrid_box, ilower, iupper); shift_ibox= hypre_CF_StenBox(&fgrid_box, cgrid_box, stencil_shape, refine_factors, ndim); if ( hypre_BoxVolume(shift_ibox) ) { ac_ptr= hypre_StructMatrixExtractPointerByIndex(smatrix, ci, stencil_shape); hypre_BoxGetSize(shift_ibox, loop_size); hypre_BoxLoop1Begin(ndim, loop_size, ac_dbox, hypre_BoxIMin(shift_ibox), stride, iac); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iac) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(iac) { ac_ptr[iac] = 0.0; } hypre_BoxLoop1End(iac); } /* if ( hypre_BoxVolume(shift_ibox) ) */ hypre_BoxDestroy(shift_ibox); } /* for (j= 0; j< nboxman_entries; j++) */ } /* if (abs_shape) */ } /* for (i= 0; i< stencil_size; i++) */ } /* if (stencils != NULL) */ } /* for (var2= 0; var2< nvars; var2++) */ hypre_TFree(boxman_entries); } /* hypre_ForBoxI ci */ } /* for (var1= 0; var1< nvars; var1++) */ return ierr; } /*-------------------------------------------------------------------------- * hypre_FacZeroFCSten: Zeroes the fine stencil coefficients that reach * into a coarse box. * Idea: zero off any stencil connection of a fine box that does not * connect to a sibling box * Algo: For each fbox * { * 1) expand by one in each direction so that sibling boxes can be * reached * 2) boxman_intersect with the fboxman to get all fboxes including * itself and the siblings * 3) loop over intersection boxes, shift them in the stencil * direction (now we are off the fbox), and subtract any sibling * extents. The remaining chunks (boxes of a box_array) are * the desired but shifted extents. * 4) shift these shifted extents in the negative stencil direction * to get back into fbox. Zero-off the matrix over these latter * extents. * } *--------------------------------------------------------------------------*/ HYPRE_Int hypre_FacZeroFCSten( hypre_SStructPMatrix *A, hypre_SStructGrid *grid, HYPRE_Int fine_part) { MPI_Comm comm= hypre_SStructGridComm(grid); hypre_BoxManager *fboxman; hypre_BoxManEntry **boxman_entries; HYPRE_Int nboxman_entries; hypre_SStructPGrid *p_fgrid; hypre_StructGrid *fgrid; hypre_BoxArray *fgrid_boxes; hypre_Box *fgrid_box; hypre_Box scaled_box; hypre_BoxArray *intersect_boxes, *tmp_box_array1, *tmp_box_array2; hypre_StructMatrix *smatrix; hypre_StructStencil *stencils; HYPRE_Int stencil_size; hypre_Index stride, ilower, iupper; hypre_Index stencil_shape, shift_index; hypre_Box shift_ibox; hypre_Box intersect_box; hypre_Index size_ibox; HYPRE_Int nvars, var1, var2; HYPRE_Int ndim; hypre_Box *a_dbox; double *a_ptr; hypre_Index loop_size; HYPRE_Int ia; HYPRE_Int fi, fj, i, j; HYPRE_Int abs_shape; HYPRE_Int myid, proc; HYPRE_Int ierr = 0; hypre_MPI_Comm_rank(comm, &myid); p_fgrid = hypre_SStructPMatrixPGrid(A); nvars = hypre_SStructPMatrixNVars(A); ndim = hypre_SStructPGridNDim(p_fgrid); hypre_ClearIndex(stride); for (i= 0; i< ndim; i++) { stride[i]= 1; } tmp_box_array1= hypre_BoxArrayCreate(1); for (var1= 0; var1< nvars; var1++) { fgrid = hypre_SStructPGridSGrid(hypre_SStructPMatrixPGrid(A), var1); fgrid_boxes= hypre_StructGridBoxes(fgrid); fboxman = hypre_SStructGridBoxManager(grid, fine_part, var1); hypre_ForBoxI(fi, fgrid_boxes) { fgrid_box= hypre_BoxArrayBox(fgrid_boxes, fi); hypre_ClearIndex(size_ibox); for (i= 0; i< ndim; i++) { size_ibox[i] = hypre_BoxSizeD(fgrid_box, i) - 1; } /* expand fgrid_box & boxman_intersect with fboxman. */ hypre_SubtractIndex(hypre_BoxIMin(fgrid_box), stride, hypre_BoxIMin(&scaled_box)); hypre_AddIndex(hypre_BoxIMax(fgrid_box), stride, hypre_BoxIMax(&scaled_box)); hypre_BoxManIntersect(fboxman, hypre_BoxIMin(&scaled_box), hypre_BoxIMax(&scaled_box), &boxman_entries, &nboxman_entries); for (var2= 0; var2< nvars; var2++) { stencils= hypre_SStructPMatrixSStencil(A, var1, var2); if (stencils != NULL) { stencil_size= hypre_StructStencilSize(stencils); smatrix = hypre_SStructPMatrixSMatrix(A, var1, var2); a_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(smatrix), fi); for (i= 0; i< stencil_size; i++) { hypre_CopyIndex(hypre_StructStencilElement(stencils, i), stencil_shape); AbsStencilShape(stencil_shape, abs_shape); if (abs_shape) /* non-centre stencils are zeroed */ { hypre_SetIndex(shift_index, size_ibox[0]*stencil_shape[0], size_ibox[1]*stencil_shape[1], size_ibox[2]*stencil_shape[2]); hypre_AddIndex(shift_index, hypre_BoxIMin(fgrid_box), hypre_BoxIMin(&shift_ibox)); hypre_AddIndex(shift_index, hypre_BoxIMax(fgrid_box), hypre_BoxIMax(&shift_ibox)); hypre_IntersectBoxes(&shift_ibox, fgrid_box, &shift_ibox); hypre_SetIndex(shift_index, -stencil_shape[0], -stencil_shape[1], -stencil_shape[2]); /*----------------------------------------------------------- * Check to see if the stencil does not couple to a sibling * box. These boxes should be in boxman_entries. But do not * subtract fgrid_box itself, which is also in boxman_entries. *-----------------------------------------------------------*/ hypre_AddIndex(stencil_shape, hypre_BoxIMin(&shift_ibox), hypre_BoxIMin(&shift_ibox)); hypre_AddIndex(stencil_shape, hypre_BoxIMax(&shift_ibox), hypre_BoxIMax(&shift_ibox)); intersect_boxes= hypre_BoxArrayCreate(1); hypre_CopyBox(&shift_ibox, hypre_BoxArrayBox(intersect_boxes,0)); for (j= 0; j< nboxman_entries; j++) { hypre_SStructBoxManEntryGetProcess(boxman_entries[j], &proc); hypre_SStructBoxManEntryGetBoxnum(boxman_entries[j], &fj); if ((proc != myid) || (fj != fi)) { hypre_BoxManEntryGetExtents(boxman_entries[j], ilower, iupper); hypre_BoxSetExtents(&scaled_box, ilower, iupper); hypre_IntersectBoxes(&shift_ibox, &scaled_box, &intersect_box); if ( hypre_BoxVolume(&intersect_box) ) { hypre_CopyBox(&intersect_box, hypre_BoxArrayBox(tmp_box_array1, 0)); tmp_box_array2= hypre_BoxArrayCreate(0); hypre_SubtractBoxArrays(intersect_boxes, tmp_box_array1, tmp_box_array2); hypre_BoxArrayDestroy(tmp_box_array2); } } } /* for (j= 0; j< nboxman_entries; j++) */ /*----------------------------------------------------------- * intersect_boxes now has the shifted extents for the * coefficients to be zeroed. *-----------------------------------------------------------*/ a_ptr= hypre_StructMatrixExtractPointerByIndex(smatrix, fi, stencil_shape); hypre_ForBoxI(fj, intersect_boxes) { hypre_CopyBox(hypre_BoxArrayBox(intersect_boxes, fj), &intersect_box); hypre_AddIndex(shift_index, hypre_BoxIMin(&intersect_box), hypre_BoxIMin(&intersect_box)); hypre_AddIndex(shift_index, hypre_BoxIMax(&intersect_box), hypre_BoxIMax(&intersect_box)); hypre_BoxGetSize(&intersect_box, loop_size); hypre_BoxLoop1Begin(ndim, loop_size, a_dbox, hypre_BoxIMin(&intersect_box), stride, ia); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,ia) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(ia) { a_ptr[ia] = 0.0; } hypre_BoxLoop1End(ia); } /* hypre_ForBoxI(fj, intersect_boxes) */ hypre_BoxArrayDestroy(intersect_boxes); } /* if (abs_shape) */ } /* for (i= 0; i< stencil_size; i++) */ } /* if (stencils != NULL) */ } /* for (var2= 0; var2< nvars; var2++) */ hypre_TFree(boxman_entries); } /* hypre_ForBoxI(fi, fgrid_boxes) */ } /* for (var1= 0; var1< nvars; var1++) */ hypre_BoxArrayDestroy(tmp_box_array1); return ierr; }
lambda.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #include <stdint.h> #include "../timer.h" //------------------------------------------------------------------------------------------------------------------------------ void rebuild_lambda(domain_type * domain, int level, double a, double b){ uint64_t _timeStart = CycleTime(); int CollaborativeThreadingBoxSize = 100000; // i.e. never #ifdef __COLLABORATIVE_THREADING CollaborativeThreadingBoxSize = 1 << __COLLABORATIVE_THREADING; #endif int omp_across_boxes = (domain->subdomains[0].levels[level].dim.i < CollaborativeThreadingBoxSize); int omp_within_a_box = (domain->subdomains[0].levels[level].dim.i >= CollaborativeThreadingBoxSize); int box; double dominant_eigenvalue = -1.0; #pragma omp parallel for private(box) if(omp_across_boxes) reduction(max:dominant_eigenvalue) for(box=0;box<domain->subdomains_per_rank;box++){ int i,j,k; int pencil = domain->subdomains[box].levels[level].pencil; int plane = domain->subdomains[box].levels[level].plane; int ghosts = domain->subdomains[box].levels[level].ghosts; int dim_k = domain->subdomains[box].levels[level].dim.k; int dim_j = domain->subdomains[box].levels[level].dim.j; int dim_i = domain->subdomains[box].levels[level].dim.i; double h2inv = 1.0/(domain->h[level]*domain->h[level]); double * __restrict__ alpha = domain->subdomains[box].levels[level].grids[__alpha ] + ghosts*(1+pencil+plane); double * __restrict__ beta_i = domain->subdomains[box].levels[level].grids[__beta_i] + ghosts*(1+pencil+plane); double * __restrict__ beta_j = domain->subdomains[box].levels[level].grids[__beta_j] + ghosts*(1+pencil+plane); double * __restrict__ beta_k = domain->subdomains[box].levels[level].grids[__beta_k] + ghosts*(1+pencil+plane); double * __restrict__ lambda = domain->subdomains[box].levels[level].grids[__lambda] + ghosts*(1+pencil+plane); double box_eigenvalue = -1.0; #pragma omp parallel for private(k,j,i) if(omp_within_a_box) collapse(2) reduction(max:box_eigenvalue) for(k=0;k<dim_k;k++){ for(j=0;j<dim_j;j++){ for(i=0;i<dim_i;i++){ int ijk = i + j*pencil + k*plane; // radius of Gershgorin disc is the sum of the absolute values of the off-diagonal elements... double sumAij = fabs(b*h2inv*beta_i[ijk]) + fabs(b*h2inv*beta_i[ijk+ 1]) + fabs(b*h2inv*beta_j[ijk]) + fabs(b*h2inv*beta_j[ijk+pencil]) + fabs(b*h2inv*beta_k[ijk]) + fabs(b*h2inv*beta_k[ijk+ plane]); // centr of Gershgorin disc is the diagonal element... double Aii = a*alpha[ijk] - b*h2inv*( -beta_i[ijk]-beta_i[ijk+ 1] -beta_j[ijk]-beta_j[ijk+pencil] -beta_k[ijk]-beta_k[ijk+ plane] ); lambda[ijk] = 1.0/Aii; // inverse of the diagonal Aii double Di = (Aii + sumAij)/Aii;if(Di>box_eigenvalue)box_eigenvalue=Di; // upper limit to Gershgorin disc == bound on dominant eigenvalue }}} if(box_eigenvalue>dominant_eigenvalue){dominant_eigenvalue = box_eigenvalue;} } domain->cycles.blas1[level] += (uint64_t)(CycleTime()-_timeStart); #ifdef __MPI uint64_t _timeStartAllReduce = CycleTime(); double send = dominant_eigenvalue; MPI_Allreduce(&send,&dominant_eigenvalue,1,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD); uint64_t _timeEndAllReduce = CycleTime(); domain->cycles.collectives[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce); domain->cycles.communication[level] += (uint64_t)(_timeEndAllReduce-_timeStartAllReduce); #endif if(domain->rank==0){if(level==0)printf("\n");printf(" level=%2d, eigenvalue_max ~= %e\n",level,dominant_eigenvalue);fflush(stdout);} domain->dominant_eigenvalue_of_DinvA[level] = dominant_eigenvalue; }
zipmonster_fmt_plug.c
/* This format is reverse engineered from InsidePro Hash Manager! * * This software is Copyright (c) 2016, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_zipmonster; #elif FMT_REGISTERS_H john_register_one(&fmt_zipmonster); #else #include "arch.h" #include "sha.h" #include "md5.h" #include <string.h> #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "simd-intrinsics.h" //#undef SIMD_COEF_32 #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "ZipMonster" #define FORMAT_NAME "MD5(ZipMonster)" #define ALGORITHM_NAME "MD5-" MD5_ALGORITHM_NAME " x 50000" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define SALT_SIZE 0 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #ifdef SIMD_COEF_32 #define MAX_KEYS_PER_CRYPT (SIMD_PARA_MD5*SIMD_COEF_32) #else #define MAX_KEYS_PER_CRYPT 1 #endif #define FORMAT_TAG "$zipmonster$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) static struct fmt_tests zipmonster_tests[] = { {"$zipmonster$e0f68d6f40c5f157c169e9ca0a6f09fe", "!"}, {"4dac447f100ee85327db2b47e295e50d", "1"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *saved_len; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static unsigned short itoa16u_w[256]; #ifdef SIMD_COEF_32 #define GETPOS(i,index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*64*SIMD_COEF_32 ) #endif static void init(struct fmt_main *self) { int i; char buf[3]; #ifdef _OPENMP static int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); for (i = 0; i < 256; ++i) { sprintf(buf, "%X%X", i>>4, i&0xF); memcpy(&(itoa16u_w[i]), buf, 2); } } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p = ciphertext; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = ciphertext + TAG_LENGTH; if (!p) return 0; if (!ishexlc(p)) return 0; if (strlen(p) != BINARY_SIZE * 2) return 0; return 1; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p = ciphertext; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = ciphertext + TAG_LENGTH; for (i = 0; i < BINARY_SIZE && *p; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } #ifndef SIMD_COEF_32 inline static void hex_encode_uppercase(unsigned char *str, unsigned char *_out) { int i; unsigned short *out = (unsigned short*)_out; for (i = 0; i < BINARY_SIZE; ++i) { out[i] = itoa16u_w[str[i]]; } } #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; int inc = 1; #ifdef SIMD_COEF_32 inc = SIMD_COEF_32*SIMD_PARA_MD5; #endif #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += inc) { unsigned char buffer[BINARY_SIZE]; MD5_CTX ctx; int n = 49999; #ifdef SIMD_COEF_32 int j, k; uint32_t *p, t; uint8_t ib[64 * SIMD_COEF_32 * SIMD_PARA_MD5 + MEM_ALIGN_SIMD]; uint8_t ob[16 * SIMD_COEF_32 * SIMD_PARA_MD5 + MEM_ALIGN_SIMD]; uint8_t *md5 = mem_align(ib, MEM_ALIGN_SIMD); uint32_t *crypt_buf = mem_align(ob, MEM_ALIGN_SIMD); memset(md5, 0, 64 * SIMD_COEF_32 * SIMD_PARA_MD5); for (j = 0; j < SIMD_COEF_32*SIMD_PARA_MD5; ++j) { uint16_t *op = (uint16_t*)&md5[GETPOS(0, j)]; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index+j], strlen(saved_key[index+j])); MD5_Final(buffer, &ctx); for (k = 0; k < 16; ++k) { op[0] = itoa16u_w[buffer[k++]]; op[1] = itoa16u_w[buffer[k]]; op += ((SIMD_COEF_32) << 1); } md5[GETPOS(32,j)] = 0x80; md5[GETPOS(57,j)] = 1; } #else unsigned char hex_buffer[BINARY_SIZE * 2]; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], strlen(saved_key[index])); MD5_Final(buffer, &ctx); hex_encode_uppercase(buffer, hex_buffer); #endif do { #ifdef SIMD_COEF_32 SIMDmd5body(md5, crypt_buf, NULL, SSEi_MIXED_IN); // upper case hex encode into the next input buffer. for (j = 0; j < SIMD_PARA_MD5*SIMD_COEF_32; ++j) { int i; uint16_t *op = (uint16_t*)&md5[GETPOS(0, j)]; p = &crypt_buf[(j&(SIMD_COEF_32-1))+(4*SIMD_COEF_32*(j/SIMD_COEF_32))]; for (i = 0; i < 4; ++i) { t = *p; p += SIMD_COEF_32; op[0] = itoa16u_w[t&0xFF]; op[1] = itoa16u_w[(t>>8)&0xFF]; t >>= 16; op += ((SIMD_COEF_32) << 1); op[0] = itoa16u_w[t&0xFF]; op[1] = itoa16u_w[(t>>8)&0xFF]; op += ((SIMD_COEF_32) << 1); } } #else MD5_Init(&ctx); MD5_Update(&ctx, hex_buffer, BINARY_SIZE * 2); MD5_Final(buffer, &ctx); hex_encode_uppercase(buffer, hex_buffer); #endif --n; } while (n); #ifdef SIMD_COEF_32 p = crypt_buf; for (j = 0; j < SIMD_PARA_MD5*SIMD_COEF_32; j+=SIMD_COEF_32) { for (k = 0; k < SIMD_COEF_32*4; ++k) { uint32_t J = j+(k&(SIMD_COEF_32-1)), K = (k/SIMD_COEF_32); crypt_out[index+J][K] = *p++; } } #else memcpy((unsigned char*)crypt_out[index], buffer, BINARY_SIZE); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void zipmonster_set_key(char *key, int index) { saved_len[index] = strnzcpyn(saved_key[index], key, sizeof(saved_key[index])); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_zipmonster = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, { FORMAT_TAG }, #endif zipmonster_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, fmt_default_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, zipmonster_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
conv3x3s1_winograd64_transform_kernel_neon5_GgG.h
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "option.h" #include "mat.h" namespace ncnn{ static void conv3x3s1_winograd64_transform_kernel_neon5_GgG(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*8, inch, outch); const float ktm[8][3] = { { 1.0f, 0.0f, 0.0f}, {-2.0f/9, -2.0f/9, -2.0f/9}, {-2.0f/9, 2.0f/9, -2.0f/9}, {1.0f/90, 1.0f/45, 2.0f/45}, {1.0f/90, -1.0f/45, 2.0f/45}, {1.0f/45, 1.0f/90, 1.0f/180}, {1.0f/45, -1.0f/90, 1.0f/180}, { 0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i=0; i<8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j=0; j<8; j++) { float* tmpp = &tmp[j][0]; for (int i=0; i<8; i++) { kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } } }
SpatialGridSamplerBilinear.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialGridSamplerBilinear.c" #else #undef MIN #define MIN(a,b) ( ((a)<(b)) ? (a) : (b) ) #undef MAX #define MAX(a,b) ( ((a)>(b)) ? (a) : (b) ) #undef MODE_BORDER #define MODE_BORDER 1 static inline void THNN_(SpatialGridSamplerBilinear_shapeCheck) (THTensor *input, THTensor *grid, THTensor *gradOutput) { THNN_ARGCHECK(input->nDimension == 4, 2, input, "4D input tensor expected but got: %s"); THNN_ARGCHECK(grid->nDimension == 4, 2, grid, "4D grid tensor expected but got: %s"); int nbatch = THTensor_(size)(input, 0); int channels = THTensor_(size)(input, 1); int oheight = THTensor_(size)(grid, 1); int owidth = THTensor_(size)(grid, 2); THNN_CHECK_DIM_SIZE(grid, 4, 0, nbatch); THNN_CHECK_DIM_SIZE(grid, 4, 3, 2); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, 4, 0, nbatch); THNN_CHECK_DIM_SIZE(gradOutput, 4, 1, channels); THNN_CHECK_DIM_SIZE(gradOutput, 4, 2, oheight); THNN_CHECK_DIM_SIZE(gradOutput, 4, 3, owidth); } } #define SAFE_GET(input, x, y, n, c, H, W) x >= 0 && x < W && y >=0 \ && y < H ? THTensor_fastGet4d(input, n, c, y, x) : 0 #define CLIP_COORDINATES(in, out, clip_limit) out = MIN((clip_limit-1), MAX(in, 0)) TH_API void THNN_(SpatialGridSamplerBilinear_updateOutput)( THNNState *state, THTensor *input, THTensor *grid, THTensor *output, int padding_mode) { THNN_(SpatialGridSamplerBilinear_shapeCheck)(input, grid, NULL); int N = THTensor_(size)(input, 0); int C = THTensor_(size)(input, 1); int IH = THTensor_(size)(input, 2); int IW = THTensor_(size)(input, 3); int H = THTensor_(size)(grid, 1); int W = THTensor_(size)(grid, 2); // resize output to the same shape as input THTensor_(resize4d)(output, N, C, H, W); // loop over each output pixel int n, h, w, c; #pragma omp parallel for private(n, h, w, c) for (n = 0; n < N; ++n) { for (h = 0; h < H; ++h) { for (w = 0; w < W; ++w) { // get the corresponding input x, y co-ordinates from grid real ix = THTensor_fastGet4d(grid, n, h, w, 0); real iy = THTensor_fastGet4d(grid, n, h, w, 1); // normalize ix, iy from [-1, 1] to [0, IH-1] & [0, IW-1] ix = ((ix + 1) / 2) * (IW-1); iy = ((iy + 1) / 2) * (IH-1); // get NE, NW, SE, SW pixel values from (x, y) int ix_nw = floor(ix); int iy_nw = floor(iy); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; // get surfaces to each neighbor: real nw = (ix_se - ix) * (iy_se - iy); real ne = (ix - ix_sw) * (iy_sw - iy); real sw = (ix_ne - ix) * (iy - iy_ne); real se = (ix - ix_nw) * (iy - iy_nw); if (padding_mode==MODE_BORDER){ // clip coordinates to image borders CLIP_COORDINATES(ix_nw, ix_nw, IW); CLIP_COORDINATES(iy_nw, iy_nw, IH); CLIP_COORDINATES(ix_ne, ix_ne, IW); CLIP_COORDINATES(iy_ne, iy_ne, IH); CLIP_COORDINATES(ix_sw, ix_sw, IW); CLIP_COORDINATES(iy_sw, iy_sw, IH); CLIP_COORDINATES(ix_se, ix_se, IW); CLIP_COORDINATES(iy_se, iy_se, IH); } // calculate bilinear weighted pixel value and set output pixel for (c = 0; c < C; ++c) { // (c, iy_nw, ix_nw) * nw + (c, iy_ne, ix_ne) * ne // + (c, iy_sw, ix_sw) * sw + (c, iy_se, ix_se) * se real nw_val = SAFE_GET(input, ix_nw, iy_nw, n, c, IH, IW); real ne_val = SAFE_GET(input, ix_ne, iy_ne, n, c, IH, IW); real sw_val = SAFE_GET(input, ix_sw, iy_sw, n, c, IH, IW); real se_val = SAFE_GET(input, ix_se, iy_se, n, c, IH, IW); real out_val = nw_val * nw + ne_val * ne + sw_val * sw + se_val * se; THTensor_fastSet4d(output, n, c, h, w, out_val); } } } } } #define SAFE_ADD(input, x, y, n, c, H, W, value) \ do { \ if (x >= 0 && x < W && y >=0 && y < H) { \ real old_value = THTensor_fastGet4d(input, n, c, y, x); \ THTensor_fastSet4d(input, n, c, y, x, value + old_value); \ } \ } while(0) TH_API void THNN_(SpatialGridSamplerBilinear_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradInput, THTensor *grid, THTensor *gradGrid, THTensor *gradOutput, int padding_mode) { THNN_(SpatialGridSamplerBilinear_shapeCheck)(input, grid, gradOutput); int N = THTensor_(size)(input, 0); int C = THTensor_(size)(input, 1); int IH = THTensor_(size)(input, 2); int IW = THTensor_(size)(input, 3); int H = THTensor_(size)(grid, 1); int W = THTensor_(size)(grid, 2); THTensor_(resize4d)(gradInput, N, C, IH, IW); THTensor_(resize4d)(gradGrid, N, H, W, 2); THTensor_(zero)(gradInput); THTensor_(zero)(gradGrid); // loop over each output pixel int n, h, w; #pragma omp parallel for private(n, h, w) for (n = 0; n < N; ++n) { for (h = 0; h < H; ++h) { for (w = 0; w < W; ++w) { // get the corresponding input x, y co-ordinates from grid real ix = THTensor_fastGet4d(grid, n, h, w, 0); real iy = THTensor_fastGet4d(grid, n, h, w, 1); real gix = 0; real giy = 0; // normalize ix, iy from [-1, 1] to [0, H-1] & [0, W-1] ix = ((ix + 1) / 2) * (IW-1); iy = ((iy + 1) / 2) * (IH-1); // get NE, NW, SE, SW pixel values from (x, y) int ix_nw = floor(ix); int iy_nw = floor(iy); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; // get surfaces to each neighbor: real nw = (ix_se - ix) * (iy_se - iy); real ne = (ix - ix_sw) * (iy_sw - iy); real sw = (ix_ne - ix) * (iy - iy_ne); real se = (ix - ix_nw) * (iy - iy_nw); int ix_nw_cl, iy_nw_cl, ix_ne_cl, iy_ne_cl, ix_sw_cl, iy_sw_cl, ix_se_cl, iy_se_cl; if (padding_mode==MODE_BORDER){ // get clipped NE, NW, SE, SW pixel values from (x, y) CLIP_COORDINATES(ix_nw, ix_nw_cl, IW); CLIP_COORDINATES(iy_nw, iy_nw_cl, IH); CLIP_COORDINATES(ix_ne, ix_ne_cl, IW); CLIP_COORDINATES(iy_ne, iy_ne_cl, IH); CLIP_COORDINATES(ix_sw, ix_sw_cl, IW); CLIP_COORDINATES(iy_sw, iy_sw_cl, IH); CLIP_COORDINATES(ix_se, ix_se_cl, IW); CLIP_COORDINATES(iy_se, iy_se_cl, IH); } else { ix_nw_cl = ix_nw; iy_nw_cl = iy_nw; ix_ne_cl = ix_ne; iy_ne_cl = iy_ne; ix_sw_cl = ix_sw; iy_sw_cl = iy_sw; ix_se_cl = ix_se; iy_se_cl = iy_se; } for (int c = 0; c < C; ++c) { real gradout = THTensor_fastGet4d(gradOutput, n, c, h, w); // calculate and set gradInput SAFE_ADD(gradInput, ix_nw_cl, iy_nw_cl, n, c, IH, IW, nw * gradout); SAFE_ADD(gradInput, ix_ne_cl, iy_ne_cl, n, c, IH, IW, ne * gradout); SAFE_ADD(gradInput, ix_sw_cl, iy_sw_cl, n, c, IH, IW, sw * gradout); SAFE_ADD(gradInput, ix_se_cl, iy_se_cl, n, c, IH, IW, se * gradout); // calculate gradGrid real nw_val = SAFE_GET(input, ix_nw_cl, iy_nw_cl, n, c, IH, IW); real ne_val = SAFE_GET(input, ix_ne_cl, iy_ne_cl, n, c, IH, IW); real sw_val = SAFE_GET(input, ix_sw_cl, iy_sw_cl, n, c, IH, IW); real se_val = SAFE_GET(input, ix_se_cl, iy_se_cl, n, c, IH, IW); gix -= nw_val * (iy_se - iy) * gradout; gix += ne_val * (iy_sw - iy) * gradout; gix -= sw_val * (iy - iy_ne) * gradout; gix += se_val * (iy - iy_nw) * gradout; giy -= nw_val * (ix_se - ix) * gradout; giy -= ne_val * (ix - ix_sw) * gradout; giy += sw_val * (ix_ne - ix) * gradout; giy += se_val * (ix - ix_nw) * gradout; } // un-normalize gradGrid values back to [-1, 1] constraints gix = gix * (IW - 1) / 2; giy = giy * (IH - 1) / 2; real gix_old = THTensor_fastGet4d(gradGrid, n, h, w, 0); real giy_old = THTensor_fastGet4d(gradGrid, n, h, w, 1); THTensor_fastSet4d(gradGrid, n, h, w, 0, gix_old + gix); THTensor_fastSet4d(gradGrid, n, h, w, 1, giy_old + giy); } } } } #undef MIN #undef MAX #undef SAFE_GET #undef CLIP_COORDINATES #undef SAFE_ADD #undef MODE_BORDER #endif
sigmoid.c
#include <cdnn/activations.h> #include <cdnn/model.h> extern __Model__ * m; dARRAY * forward_pass_sigmoid(){ dARRAY * sigmoid_outf = NULL; sigmoid_outf = (dARRAY*)malloc(sizeof(dARRAY)); sigmoid_outf->matrix = (float*)calloc(m->current_layer->DENSE->cache->shape[0]*m->current_layer->DENSE->cache->shape[1],sizeof(float)); omp_set_num_threads(8); #pragma omp parallel for num_threads(8) shared(sigmoid_outf) schedule(static) for(int i=0;i<m->current_layer->DENSE->cache->shape[0]*m->current_layer->DENSE->cache->shape[1];i++){ float exp_res = 0.0f; //For numerical stability if(m->current_layer->DENSE->cache->matrix[i]>0.0f){ exp_res = (float)exp(-1 * m->current_layer->DENSE->cache->matrix[i]); sigmoid_outf->matrix[i] = 1.0f/(1.0f + exp_res); } else if(m->current_layer->DENSE->cache->matrix[i]<0.0f){ exp_res = (float)exp(m->current_layer->DENSE->cache->matrix[i]); sigmoid_outf->matrix[i] = exp_res/(1.0f + exp_res); } // if(sigmoid_outf->matrix[i]>=0.99f) sigmoid_outf->matrix[i] = 0.98980; // else if(sigmoid_outf->matrix[i]<=0.00001f) sigmoid_outf->matrix[i] = 0.0015; } sigmoid_outf->shape[0] = m->current_layer->DENSE->cache->shape[0]; sigmoid_outf->shape[1] = m->current_layer->DENSE->cache->shape[1]; return sigmoid_outf; } dARRAY * backward_pass_sigmoid(){ dARRAY * sigmoid_outb = NULL; dARRAY * temp = NULL; dARRAY * one = NULL; int dims[] = {m->current_layer->DENSE->A->shape[0],m->current_layer->DENSE->A->shape[1]}; one = ones(dims); temp = subtract(one,m->current_layer->DENSE->A); free2d(one); one=NULL; sigmoid_outb = multiply(m->current_layer->DENSE->A,temp); free2d(temp); temp = NULL; return sigmoid_outb; } Sigmoid * Sigmoid__init__(dARRAY * layer_matrix){ Sigmoid * sigmoid = (Sigmoid*)malloc(sizeof(Sigmoid)); sigmoid->forward = forward_pass_sigmoid; sigmoid->backward = backward_pass_sigmoid; sigmoid->in_dims[0] = sigmoid->out_dims[0] = layer_matrix->shape[0]; sigmoid->in_dims[1] = sigmoid->out_dims[1] = layer_matrix->shape[1]; return sigmoid; } dARRAY * (sigmoid)(Sigmoid_args args){ Sigmoid * s = Sigmoid__init__(args.input); if(!args.status) return s->forward(args.input); else return s->backward(args.input); free(s); s=NULL; }
exercise1.c
/* Lab 3 Exercise 1 Program We are going to start with the matrix multiplication code from the previous lab to see what effect OpenMP has on improving the performance. Set 'OpenMP Support' to 'Yes' (for both Debug and Release builds) in Project->Properties->C/C++->Language Add `_CRT_SECURE_NO_WARNINGS` to 'Preprocessor Definitions' in Project->Properties->C/C++->Preprocessor */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> /* To enable OpenMP support in your project you will need to include the OpenMP header file `omp.h` and enable the compiler to use the OpenMP runtime. */ #include <omp.h> #define N 1024 // Number of rows/columns in our randomly generated matrices typedef double element_type; // The data type of the matrix elements typedef element_type** matrixNN; // Use 2-dimensional pointer to represent an N by N matrix // Function declarations void init_random_matrix(matrixNN m); void init_zero_matrix(matrixNN m); void write_matrix_to_file(const char *filename, const matrixNN r); void transpose(matrixNN t); void multiply(matrixNN r, const matrixNN a, const matrixNN b); /* Execute the program with and without parallelisation and compare the outputs using the Windows `FC` file comparison command (similar to `unix diff`) in a terminal to ensure our results are consistent and we haven't made any mistakes in parallelisation. This will print any file differences (you will need to name the output files differently/give different paths). After verifying that the correct output is produced after each modification of the code, we record performance results below | Machine | Optimisation | Execution time(s) | Timing method | | :-----: | :----------: | :---------------: | :-----------: | | Laptop | Serial | 1.93s, 1.96s | `clock()` | | Laptop | Serial | 1.92s, 1.91s | `omp_get_wtime()` | | Laptop | Parallel | 0.59s, 0.57s | `omp_get_wtime()` | 4 threads | Library Desktop | Serial | 1.04s | `omp_get_wtime()` | | Library Desktop | Parallel | 0.11s | `omp_get_wtime()` | */ void main(){ // Variable declarations double begin, end; double seconds; matrixNN a; matrixNN b; matrixNN c; int i; // Iteration variable // For each matrix, allocate memory for the pointers to each row, then allocate the memory for the elements in each row a = (matrixNN)malloc(sizeof(element_type) * N); for (i = 0; i < N; i++) a[i] = (element_type*)malloc(sizeof(element_type) * N); b = (matrixNN)malloc(sizeof(element_type) * N); for (i = 0; i < N; i++) b[i] = (element_type*)malloc(sizeof(element_type) * N); c = (matrixNN)malloc(sizeof(element_type) * N); for (i = 0; i < N; i++) c[i] = (element_type*)malloc(sizeof(element_type) * N); init_random_matrix(a); init_random_matrix(b); init_zero_matrix(c); int max_threads = omp_get_max_threads(); printf("OpenMP using %d threads\n", max_threads); begin = omp_get_wtime(); // Calculate the matrix product of `a` and `b` and write the result to `c` multiply(c, a, b); end = omp_get_wtime(); seconds = end - begin; printf("Matrix multiply complete in %.2f seconds\n", seconds); // Write the results matrix `c` to the file specified below printf("Writing results...\n"); write_matrix_to_file("matrix_mul.txt", c); printf("Done writing results\n"); // Free the memory allocated for each matrix for (i = 0; i < N; i++) free(a[i]); free(a); for (i = 0; i < N; i++) free(b[i]); free(b); for (i = 0; i < N; i++) free(c[i]); free(c); } void init_random_matrix(matrixNN m) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { //m[i][j] = rand() % 100; // For randomly generated integers between 0 and 99 m[i][j] = rand() / (element_type)RAND_MAX; // Normalize for `float` or `double` numbers between 0 and 1 } } } void init_zero_matrix(matrixNN m) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { m[i][j] = 0; } } } void transpose(matrixNN t) { int i, j; element_type temp; // Iterate over the upper triangle of the matrix, swapping elements to transpose the matrix in place (saving memory) for (i = 0; i < N; i++) { for (j = i + 1; j < N; j++) { temp = t[i][j]; t[i][j] = t[j][i]; t[j][i] = temp; } } } /* 1.1 We will parallelise the outer loop (within the `multiply` function). Create a directive to parallelise over the outer loop. Run your parallelised code and compare the text file output to the original (serial version) using the file compare command `FC` in a Windows terminal. 1.2 Set the OpenMP clause `default(none)`. This will give a compiler error for any variables which you have not explicitly defined the scope. Now try defining the scope for all variables of the parallel block. This should achieve both a speedup and return the correct result The variable `i` is the parallel loop counter so is implicitly defined as `private`. The variables`a` and `b` are `const` so are implicitly `shared`. */ void multiply(matrixNN r, const matrixNN a, const matrixNN b){ int i, j, k; element_type temp; // Variable to hold the sum in the calculation of each entry of the matrix product transpose(b); // Transpose the matrix inplace so that we can access entries by row during multiplication // Define the scope for all variables of the parallel block. `private` to each thread, vs. `shared` between threads #pragma omp parallel for default(none) private(i, j, k, temp) shared(r, a, b) for (i = 0; i < N; i++){ for (j = 0; j < N; j++){ temp = 0; for (k = 0; k < N; k++){ // Note that we access the transposed matrix `b` by rows temp += a[i][k] * b[j][k]; } r[i][j] = temp; } } } void write_matrix_to_file(const char* filename, const matrixNN r) { FILE* f; int i, j; f = fopen(filename, "w"); if (f == NULL) { fprintf(stderr, "Error opening file '%s' for write\n", filename); return; } for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { fprintf(f, "%0.2f\t", r[i][j]); } fprintf(f, "\n"); } fclose(f); }
panama_fmt_plug.c
/* Panama cracker patch for JtR. Hacked together during May of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_panama_; #elif FMT_REGISTERS_H john_register_one(&fmt_panama_); #else #include <string.h> #include "arch.h" #include "sph_panama.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> // OMP_SCALE tuned on core i7 quad core HT // 1 - 217k // 64 - 1930k // 128 - 2099k // 256 - 2204k *** set to this level // 512 - 2203k // 1k - 2124k #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 8 #else #define OMP_SCALE 256 #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "memdbg.h" #define FORMAT_LABEL "Panama" #define FORMAT_NAME "" #define FORMAT_TAG "$panama$" #define TAG_LENGTH (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "Panama 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 32 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests panama__tests[] = { {"049d698307d8541f22870dfa0a551099d3d02bc6d57c610a06a4585ed8d35ff8", "T"}, {"$panama$049d698307d8541f22870dfa0a551099d3d02bc6d57c610a06a4585ed8d35ff8", "T"}, {"a2a70386b81fb918be17f00ff3e3b376a0462c4dc2eec7f2c63202c8874c037d", "abc"}, {"$panama$a2a70386b81fb918be17f00ff3e3b376a0462c4dc2eec7f2c63202c8874c037d", "abc"}, {"017686a23c4af3b9c074888ec76f893945d541cd17ee8011b2bd0ee2d581db34", "john"}, {"$panama$017686a23c4af3b9c074888ec76f893945d541cd17ee8011b2bd0ee2d581db34", "john"}, {"3919248ab4c8dea4843663c532db9823169a71d03b0f918082c9f53748dea1e8", "passweird"}, {"$panama$3919248ab4c8dea4843663c532db9823169a71d03b0f918082c9f53748dea1e8", "passweird"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p; int extra; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; if (hexlenl(p, &extra) != BINARY_SIZE*2 || extra) return 0; return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + BINARY_SIZE * 2 + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); strnzcpy(out + TAG_LENGTH, ciphertext, BINARY_SIZE * 2 + 1); return out; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_panama_context ctx; sph_panama_init(&ctx); sph_panama(&ctx, saved_key[index], strlen(saved_key[index])); sph_panama_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void panama_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_panama_ = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, panama__tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, panama_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
CMS_simint.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <math.h> #include <sys/time.h> #include "simint/simint.h" #include "CMS_config.h" #include "CMS_basis.h" #include "CMS_simint.h" #define NCART(am) (((am)+1)*((am)+2)/2) typedef struct simint_shell shell_s; typedef struct simint_shell* shell_t; typedef struct simint_multi_shellpair multi_sp_s; typedef struct simint_multi_shellpair* multi_sp_t; static double CMS_get_walltime_sec() { double sec; struct timeval tv; gettimeofday(&tv, NULL); sec = tv.tv_sec + (double) tv.tv_usec / 1000000.0; return sec; } void CMS_Simint_init(BasisSet_p basis, Simint_p *simint, int nthread, double prim_scrval) { CMS_ASSERT(nthread > 0); Simint_p s = (Simint_p) calloc(1, sizeof(struct Simint)); CMS_ASSERT(s != NULL); simint_init(); s->nthread = nthread; s->max_am = basis->max_momentum; int max_ncart = NCART(s->max_am), buff_size; // Allocate workbuf for all threads on this node buff_size = simint_ostei_workmem(0, s->max_am); if (buff_size < max_ncart * max_ncart) buff_size = max_ncart * max_ncart; buff_size = (buff_size + 7) / 8 * 8; // Align to 8 double (64 bytes) s->workmem_per_thread = buff_size; s->workbuf = (double *) CMS_malloc_aligned(s->workmem_per_thread * nthread * sizeof(double), 64); CMS_ASSERT(s->workbuf != NULL); // Allocate outbuf for all threads on this node // Output buffer should holds Simint_NSHELL_SIMD ERI results // +8 for Simint primitive screening statistic info buff_size = max_ncart * max_ncart * max_ncart * max_ncart; buff_size = (buff_size + 7) / 8 * 8; // Align to 8 double (64 bytes) s->outmem_per_thread = buff_size * _SIMINT_NSHELL_SIMD + 8; s->outbuf = (double *) CMS_malloc_aligned(s->outmem_per_thread * nthread * sizeof(double), 64); CMS_ASSERT(s->outbuf != NULL); // Form and store Simint shells for all shells of this molecule int nshell = basis->nshells; size_t shells_msize = sizeof(shell_s) * nshell; s->nshell = nshell; s->shells = (shell_t) malloc(shells_msize); CMS_ASSERT(s->shells != NULL); s->shell_memsize = (double) shells_msize; shell_t shell_ptr = s->shells; for (int i=0; i < nshell; i++) { // Initialize variables in structure simint_initialize_shell(shell_ptr); // Allocate space for alpha and coef for the shell simint_allocate_shell(basis->nexp[i], shell_ptr); s->shell_memsize += (double) shell_ptr->memsize; shell_ptr->am = basis->momentum[i]; shell_ptr->nprim = basis->nexp[i]; shell_ptr->x = basis->xyz0[i*4+0]; shell_ptr->y = basis->xyz0[i*4+1]; shell_ptr->z = basis->xyz0[i*4+2]; for (int j=0; j<basis->nexp[i]; j++) { shell_ptr->alpha[j] = basis->exp[i][j]; shell_ptr->coef[j] = basis->cc[i][j]; } shell_ptr++; } // Here we assume there are no unit shells (shells with zero orbital exponent) simint_normalize_shells(nshell, s->shells); // For primitive screening, fast Schwarz might have issue with aug-cc-pVDZ, // try to use SIMINT_SCREEN_SCHWARZ if necessary if (prim_scrval < 0.0 || prim_scrval > 1) prim_scrval = 1e-14; s->screen_method = SIMINT_SCREEN_SCHWARZ; s->screen_tol = prim_scrval; printf("Simint screen method = SIMINT_SCREEN_SCHWARZ \n"); printf("Simint prim screen tol = %.2e\n", s->screen_tol); // Precompute all shell pairs // Will be used by CMS_Simint_fill_multi_sp_list(), DO NOT SKIP it!!! double sp_msize = sizeof(multi_sp_s) * nshell * nshell; s->shellpairs = (multi_sp_t) malloc(sp_msize); CMS_ASSERT(s->shellpairs != NULL); s->shellpair_memsize = (double) sp_msize; // Do not initialize all shell pairs now to reduce the memory usage, // unique screened shell pairs will be created after screening. // Reset timer s->ostei_setup = 0.0; s->ostei_actual = 0.0; s->fock_update_F = 0.0; // Allocate space for statistic info int stat_info_size = sizeof(double) * nthread; s->num_multi_shellpairs = (double*) malloc(stat_info_size); s->sum_nprim = (double*) malloc(stat_info_size); s->num_screened_prim = (double*) malloc(stat_info_size); s->num_unscreened_prim = (double*) malloc(stat_info_size); s->num_screened_vec = (double*) malloc(stat_info_size); s->num_unscreened_vec = (double*) malloc(stat_info_size); CMS_ASSERT(s->num_multi_shellpairs != NULL && s->sum_nprim != NULL); CMS_ASSERT(s->num_screened_prim != NULL && s->num_unscreened_prim != NULL); CMS_ASSERT(s->num_screened_vec != NULL && s->num_unscreened_vec != NULL); memset(s->num_multi_shellpairs, 0, stat_info_size); memset(s->sum_nprim, 0, stat_info_size); memset(s->num_screened_prim, 0, stat_info_size); memset(s->num_unscreened_prim, 0, stat_info_size); memset(s->num_screened_vec, 0, stat_info_size); memset(s->num_unscreened_vec, 0, stat_info_size); s->df_am_shell_id = NULL; s->df_am_shell_spos = NULL; s->df_am_shell_num = NULL; s->df_shells = NULL; s->df_shellpairs = NULL; *simint = s; } void CMS_Simint_create_uniq_scr_sp(Simint_p simint, const int nsp, const int *M_list, const int *N_list) { int nshell = simint->nshell; #pragma omp parallel for schedule(dynamic, 16) for (int i = 0; i < nsp; i++) { int M = M_list[i]; int N = N_list[i]; multi_sp_t MN_pair = &simint->shellpairs[M * nshell + N]; simint_initialize_multi_shellpair(MN_pair); simint_create_multi_shellpair(1, simint->shells + M, 1, simint->shells + N, MN_pair, simint->screen_method); simint->shellpair_memsize += (double) MN_pair->memsize; if (M != N) { multi_sp_t NM_pair = &simint->shellpairs[N * nshell + M]; simint_initialize_multi_shellpair(NM_pair); simint_create_multi_shellpair(1, simint->shells + N, 1, simint->shells + M, NM_pair, simint->screen_method); simint->shellpair_memsize += (double) NM_pair->memsize; } } double workmem_MB = simint->workmem_per_thread * 64 * sizeof(double) / 1048576.0; double outmem_MB = simint->outmem_per_thread * 64 * sizeof(double) / 1048576.0; double shellpair_mem_MB = simint->shellpair_memsize / 1048576.0; double stat_info_mem_MB = sizeof(double) * simint->nthread * 6 / 1048576.0; double Simint_mem_MB = workmem_MB + outmem_MB + outmem_MB + shellpair_mem_MB + stat_info_mem_MB; printf("CMS Simint memory usage = %.2lf MB \n", Simint_mem_MB); } void CMS_Simint_setup_DF(Simint_p simint, BasisSet_p df_basis) { Simint_p s = simint; // Reallocate workbuf for density fitting s->df_max_am = df_basis->max_momentum; if (s->df_max_am > s->max_am) s->max_am = s->df_max_am; int max_ncart = NCART(s->max_am); int buff_size = simint_ostei_workmem(0, s->max_am); if (buff_size < max_ncart * max_ncart) buff_size = max_ncart * max_ncart; buff_size = (buff_size + 7) / 8 * 8; // Align to 8 double (64 bytes) s->workmem_per_thread = buff_size; CMS_free_aligned(s->workbuf); s->workbuf = (double *) CMS_malloc_aligned(s->workmem_per_thread * s->nthread * sizeof(double), 64); CMS_ASSERT(s->workbuf != NULL); // Form and store Simint shells for all density fitting shells // The last shell is the unit shell int df_nshell = df_basis->nshells; size_t df_shells_msize = sizeof(shell_s) * (df_nshell + 1); s->df_nshell = df_nshell; s->df_shells = (shell_t) malloc(df_shells_msize); CMS_ASSERT(s->shells != NULL); s->shell_memsize = (double) df_shells_msize; // Copy all density fitting shells shell_t df_shell_ptr = s->df_shells; for (int i = 0; i < df_nshell; i++) { // Initialize variables in structure simint_initialize_shell(df_shell_ptr); // Allocate space for alpha and coef for the shell simint_allocate_shell(df_basis->nexp[i], df_shell_ptr); s->shell_memsize += (double) df_shell_ptr->memsize; df_shell_ptr->am = df_basis->momentum[i]; df_shell_ptr->nprim = df_basis->nexp[i]; df_shell_ptr->x = df_basis->xyz0[i*4+0]; df_shell_ptr->y = df_basis->xyz0[i*4+1]; df_shell_ptr->z = df_basis->xyz0[i*4+2]; for (int j = 0; j < df_basis->nexp[i]; j++) { df_shell_ptr->alpha[j] = df_basis->exp[i][j]; df_shell_ptr->coef[j] = df_basis->cc[i][j]; } df_shell_ptr++; } // The unit shell simint_initialize_shell(df_shell_ptr); simint_allocate_shell(1, df_shell_ptr); s->shell_memsize += (double) df_shell_ptr->memsize; df_shell_ptr->am = 0; df_shell_ptr->nprim = 1; df_shell_ptr->x = 0; df_shell_ptr->y = 0; df_shell_ptr->z = 0; df_shell_ptr->alpha[0] = 0; df_shell_ptr->coef[0] = 1; // Normalize shells except the unit shells simint_normalize_shells(df_nshell, s->df_shells); // Precompute all shell pairs for density fitting, DO NOT SKIP IT size_t df_sp_msize = sizeof(multi_sp_s) * df_nshell; s->df_shellpairs = (multi_sp_t) malloc(df_sp_msize); CMS_ASSERT(s->df_shellpairs != NULL); s->shellpair_memsize += (double) df_sp_msize; int unit_shell_id = df_nshell; for (int i = 0; i < df_nshell; i++) { multi_sp_t pair; pair = &s->df_shellpairs[i]; simint_initialize_multi_shellpair(pair); simint_create_multi_shellpair(1, s->df_shells+i, 1, s->df_shells+unit_shell_id, pair, s->screen_method); s->shellpair_memsize += (double) pair->memsize; } // Group density fitting shells by AM s->df_am_shell_id = (int*) malloc(sizeof(int) * df_nshell); s->df_am_shell_spos = (int*) malloc(sizeof(int) * (s->df_max_am + 2)); s->df_am_shell_num = (int*) malloc(sizeof(int) * (s->df_max_am + 1)); memset(s->df_am_shell_num, 0, sizeof(int) * (s->df_max_am + 1)); for (int i = 0; i < df_nshell; i++) { int am = s->df_shells[i].am; s->df_am_shell_num[am]++; } memset(s->df_am_shell_spos, 0, sizeof(int) * (s->df_max_am + 2)); for (int i = 1; i <= s->df_max_am + 1; i++) s->df_am_shell_spos[i] = s->df_am_shell_spos[i - 1] + s->df_am_shell_num[i - 1]; memset(s->df_am_shell_num, 0, sizeof(int) * (s->df_max_am + 1)); for (int i = 0; i < df_nshell; i++) { int am = s->df_shells[i].am; int group_pos = s->df_am_shell_spos[am] + s->df_am_shell_num[am]; s->df_am_shell_id[group_pos] = i; s->df_am_shell_num[am]++; } } void CMS_Simint_free_DF_shellpairs(Simint_p simint) { int df_nshell = simint->df_nshell; simint_free_shells(df_nshell + 1, simint->df_shells); simint_free_multi_shellpairs(df_nshell, simint->df_shellpairs); free(simint->df_shells); free(simint->df_shellpairs); free(simint->df_am_shell_id); free(simint->df_am_shell_spos); free(simint->df_am_shell_num); simint->df_shells = NULL; simint->df_shellpairs = NULL; simint->df_am_shell_id = NULL; simint->df_am_shell_spos = NULL; simint->df_am_shell_num = NULL; } void CMS_Simint_destroy(Simint_p simint, int show_stat) { // Generate final statistic info double sum_msp = 0, sum_nprim = 0; double total_prim = 0, unscreened_prim = 0; double total_vec = 0, unscreened_vec = 0; for (int i = 0; i < simint->nthread; i++) { sum_msp += (double) simint->num_multi_shellpairs[i]; sum_nprim += (double) simint->sum_nprim[i]; total_prim += simint->num_screened_prim[i] + simint->num_unscreened_prim[i]; unscreened_prim += simint->num_unscreened_prim[i]; total_vec += simint->num_screened_vec[i] + simint->num_unscreened_vec[i]; unscreened_vec += simint->num_unscreened_vec[i]; } double avg_nprim = sum_nprim / sum_msp; double prim_unscreen_ratio = unscreened_prim / total_prim; double vec_unscreen_ratio = unscreened_vec / total_vec; // Print timer and statistic info if (show_stat) { printf( "Timer: Simint setup, Simint ERI actual, Fock mat accum. = %lf, %lf, %lf sec\n", simint->ostei_setup, simint->ostei_actual, simint->fock_update_F ); printf( "Simint statistic: avg. ket-side nprim, prim unscreened ratio, SIMD unscreened ratio = %.1lf, %.1lf %%, %.1lf %%\n", avg_nprim, prim_unscreen_ratio * 100.0, vec_unscreen_ratio * 100.0 ); } // Free shell pair info int nshell = simint->nshell; int df_nshell = simint->df_nshell; if (simint->df_shells != NULL) { simint_free_shells(df_nshell + 1, simint->df_shells); simint_free_multi_shellpairs(df_nshell, simint->df_shellpairs); } simint_free_shells(nshell, simint->shells); simint_free_multi_shellpairs(nshell * nshell, simint->shellpairs); // Free memory free(simint->shellpairs); free(simint->shells); free(simint->df_shellpairs); free(simint->df_shells); free(simint->df_am_shell_id); free(simint->df_am_shell_spos); free(simint->df_am_shell_num); CMS_free_aligned(simint->workbuf); CMS_free_aligned(simint->outbuf); free(simint->num_multi_shellpairs); free(simint->sum_nprim); free(simint->num_screened_prim); free(simint->num_unscreened_prim); free(simint->num_screened_vec); free(simint->num_unscreened_vec); free(simint); simint_finalize(); } int CMS_Simint_get_sp_AM_idx(Simint_p simint, int P, int Q) { shell_t shells = simint->shells; return shells[P].am * ((_SIMINT_OSTEI_MAXAM) + 1) + shells[Q].am; } double CMS_Simint_get_DF_sp_scrval(Simint_p simint, int i) { multi_sp_t pair; pair = &simint->df_shellpairs[i]; return pair->screen_max; } void CMS_Simint_create_multi_sp(void **multi_sp_) { multi_sp_t multi_sp; multi_sp = (multi_sp_t) malloc(sizeof(multi_sp_s)); CMS_ASSERT(multi_sp != NULL); // Need not to worry about memory allocation, it will be handled later simint_initialize_multi_shellpair(multi_sp); *multi_sp_ = multi_sp; } void CMS_Simint_free_multi_sp(void *multi_sp) { CMS_ASSERT(multi_sp != NULL); simint_free_multi_shellpair(multi_sp); free(multi_sp); } static void CMS_Simint_fill_multi_sp_list( Simint_p simint, int npair, int *P_list, int *Q_list, multi_sp_t multi_sp ) { // Put the original multi_shellpairs corresponding to the shell // pairs (P_list[i], Q_list[i]) into the list multi_sp_t Pin[_SIMINT_NSHELL_SIMD]; for (int ipair = 0; ipair < npair; ipair++) { int P = P_list[ipair]; int Q = Q_list[ipair]; Pin[ipair] = &simint->shellpairs[P * simint->nshell + Q]; } // Reset output multi_sp and copy from existing multi_shellpairs. // simint_cat_multi_shellpair() will check and allocate memory for output multi_sp->nprim = 0; simint_cat_shellpairs( npair, (const struct simint_multi_shellpair **) Pin, multi_sp, simint->screen_method ); } void CMS_Simint_calc_pair_Hcore( BasisSet_p basis, Simint_p simint, int tid, int A, int B, double **integrals, int *nint ) { int size, ret; struct simint_shell *shells = simint->shells; size = NCART(shells[A].am) * NCART(shells[B].am); double *workbuf = &simint->workbuf[tid * simint->workmem_per_thread]; ret = simint_compute_ke(&shells[A], &shells[B], workbuf); CMS_ASSERT(ret == 1); double *output_buff = &simint->outbuf[tid * simint->outmem_per_thread]; ret = simint_compute_potential( basis->natoms, basis->charge, basis->xn, basis->yn, basis->zn, &shells[A], &shells[B], output_buff ); CMS_ASSERT(ret == 1); for (int i = 0; i < size; i++) output_buff[i] += workbuf[i]; *integrals = output_buff; *nint = size; } void CMS_Simint_calc_pair_ovlp( Simint_p simint, int tid, int A, int B, double **integrals, int *nint ) { int size, ret; struct simint_shell *shells = simint->shells; double *output_buff = &simint->outbuf[tid*simint->outmem_per_thread]; ret = simint_compute_overlap(&shells[A], &shells[B], output_buff); CMS_ASSERT(ret == 1); size = NCART(shells[A].am) * NCART(shells[B].am); *integrals = output_buff; *nint = size; } void CMS_Simint_calc_shellquartet( Simint_p simint, int tid, int M, int N, int P, int Q, double **ERI, int *nint ) { double setup_start, setup_end, ostei_start, ostei_end; if (tid == 0) setup_start = CMS_get_walltime_sec(); int nshell = simint->nshell; multi_sp_t bra_pair = &simint->shellpairs[M * nshell + N]; multi_sp_t ket_pair = &simint->shellpairs[P * nshell + Q]; simint->num_multi_shellpairs[tid] += 1.0; simint->sum_nprim[tid] += (double) ket_pair->nprim; if (tid == 0) { setup_end = CMS_get_walltime_sec(); ostei_start = CMS_get_walltime_sec(); } double *work_buff = &simint->workbuf[tid * simint->workmem_per_thread]; double *output_buff = &simint->outbuf [tid * simint->outmem_per_thread]; int ret = simint_compute_eri( bra_pair, ket_pair, simint->screen_tol, work_buff, output_buff ); if (tid == 0) ostei_end = CMS_get_walltime_sec(); int ERI_size; if (ret < 0) { ERI_size = 0; // Return zero ERI_size to caller; output buffer is not initialized } else { CMS_ASSERT(ret == 1); // Single shell quartet shell_t shells = simint->shells; ERI_size = NCART(shells[M].am) * NCART(shells[N].am) * NCART(shells[P].am) * NCART(shells[Q].am); } *ERI = output_buff; *nint = ERI_size; double *prim_screen_stat_info = *ERI + ERI_size; simint->num_unscreened_prim[tid] += prim_screen_stat_info[0]; simint->num_screened_prim[tid] += prim_screen_stat_info[1]; simint->num_unscreened_vec[tid] += prim_screen_stat_info[2]; simint->num_screened_vec[tid] += prim_screen_stat_info[3]; if (tid == 0) { simint->ostei_setup += setup_end - setup_start; simint->ostei_actual += ostei_end - ostei_start; } } void CMS_Simint_calc_MNMN_shellquartet( Simint_p simint, int tid, int M, int N, void **multi_sp_, double **ERI, int *nint ) { double setup_start, setup_end, ostei_start, ostei_end; if (tid == 0) setup_start = CMS_get_walltime_sec(); // Don't need to call simint_free_multi_shellpair() after use, // Simint can reuse the allocated space next time if possible multi_sp_t MN_pair = (multi_sp_t) *multi_sp_; simint_create_multi_shellpair( 1, simint->shells + M, 1, simint->shells + N, MN_pair, simint->screen_method ); simint->num_multi_shellpairs[tid] += 1.0; simint->sum_nprim[tid] += (double) MN_pair->nprim; if (tid == 0) { setup_end = CMS_get_walltime_sec(); ostei_start = CMS_get_walltime_sec(); } double *work_buff = &simint->workbuf[tid * simint->workmem_per_thread]; double *output_buff = &simint->outbuf [tid * simint->outmem_per_thread]; int ret = simint_compute_eri( MN_pair, MN_pair, simint->screen_tol, work_buff, output_buff ); if (tid == 0) ostei_end = CMS_get_walltime_sec(); int ERI_size; if (ret < 0) { ERI_size = 0; // Return zero ERI_size to caller; output buffer is not initialized } else { CMS_ASSERT(ret == 1); // Single shell quartet shell_t shells = simint->shells; ERI_size = NCART(shells[M].am) * NCART(shells[N].am) * NCART(shells[M].am) * NCART(shells[N].am); } *ERI = output_buff; *nint = ERI_size; double *prim_screen_stat_info = *ERI + ERI_size; simint->num_unscreened_prim[tid] += prim_screen_stat_info[0]; simint->num_screened_prim[tid] += prim_screen_stat_info[1]; simint->num_unscreened_vec[tid] += prim_screen_stat_info[2]; simint->num_screened_vec[tid] += prim_screen_stat_info[3]; if (tid == 0) { simint->ostei_setup += setup_end - setup_start; simint->ostei_actual += ostei_end - ostei_start; } } void CMS_Simint_calc_shellquartet_batch( Simint_p simint, int tid, int M, int N, int npair, int *P_list, int *Q_list, double **batch_ERI, int *batch_nint, void **multi_sp_ ) { double setup_start, setup_end, ostei_start, ostei_end; if (tid == 0) setup_start = CMS_get_walltime_sec(); multi_sp_t bra_pair = &simint->shellpairs[M * simint->nshell + N]; multi_sp_t ket_pairs = (multi_sp_t) *multi_sp_; CMS_Simint_fill_multi_sp_list(simint, npair, P_list, Q_list, ket_pairs); simint->num_multi_shellpairs[tid] += 1.0; simint->sum_nprim[tid] += (double) ket_pairs->nprim; if (tid == 0) { setup_end = CMS_get_walltime_sec(); ostei_start = CMS_get_walltime_sec(); } double *work_buff = &simint->workbuf[tid * simint->workmem_per_thread]; double *output_buff = &simint->outbuf [tid * simint->outmem_per_thread]; int ret = simint_compute_eri( bra_pair, ket_pairs, simint->screen_tol, work_buff, output_buff ); if (tid == 0) ostei_end = CMS_get_walltime_sec(); int ERI_size; if (ret <= 0) { ERI_size = 0; // Return zero ERI_size to caller; output buffer is not initialized } else { CMS_ASSERT(ret == npair); shell_t shells = simint->shells; int P = P_list[0], Q = Q_list[0]; ERI_size = NCART(shells[M].am) * NCART(shells[N].am) * NCART(shells[P].am) * NCART(shells[Q].am); } // Shells in P_list[] have same AM, shells in Q_list[] have same AM, // The result sizes for each quartets are the same *batch_ERI = output_buff; *batch_nint = ERI_size; double *prim_screen_stat_info = *batch_ERI + ERI_size * npair; simint->num_unscreened_prim[tid] += prim_screen_stat_info[0]; simint->num_screened_prim[tid] += prim_screen_stat_info[1]; simint->num_unscreened_vec[tid] += prim_screen_stat_info[2]; simint->num_screened_vec[tid] += prim_screen_stat_info[3]; if (tid == 0) { simint->ostei_setup += setup_end - setup_start; simint->ostei_actual += ostei_end - ostei_start; } } static void CMS_Simint_fill_DF_multi_sp_list( Simint_p simint, int npair, int *P_list, struct simint_multi_shellpair *multi_sp ) { // Put the original multi_shellpairs corresponding to the shell // pairs (P_list[i], Q_list[i]) into the list multi_sp_t Pin[_SIMINT_NSHELL_SIMD]; for (int ipair = 0; ipair < npair; ipair++) { int P = P_list[ipair]; Pin[ipair] = &simint->df_shellpairs[P]; } // Reset output multi_sp and copy from existing multi_shellpairs. // simint_cat_multi_shellpair() will check and allocate memory for output multi_sp->nprim = 0; simint_cat_shellpairs( npair, (const struct simint_multi_shellpair **) Pin, multi_sp, simint->screen_method ); } void CMS_Simint_calc_DF_shellpair( Simint_p simint, int tid, int M, int N, double **integrals, int *nint ) { double setup_start, setup_end, ostei_start, ostei_end; if (tid == 0) setup_start = CMS_get_walltime_sec(); multi_sp_t bra_pair = &simint->df_shellpairs[M]; multi_sp_t ket_pair = &simint->df_shellpairs[N]; simint->num_multi_shellpairs[tid] += 1.0; simint->sum_nprim[tid] += (double) ket_pair->nprim; if (tid == 0) { setup_end = CMS_get_walltime_sec(); ostei_start = CMS_get_walltime_sec(); } double *work_buff = &simint->workbuf[tid * simint->workmem_per_thread]; double *output_buff = &simint->outbuf [tid * simint->outmem_per_thread]; int ret = simint_compute_eri( bra_pair, ket_pair, simint->screen_tol, work_buff, output_buff ); if (tid == 0) ostei_end = CMS_get_walltime_sec(); int ERI_size; if (ret < 0) { ERI_size = 0; // Return zero ERI_size to caller; shell quartet is screened } else { CMS_ASSERT(ret == 1); shell_t df_shells = simint->df_shells; ERI_size = NCART(df_shells[M].am) * NCART(df_shells[N].am); } *integrals = output_buff; *nint = ERI_size; double *prim_screen_stat_info = *integrals + ERI_size; simint->num_unscreened_prim[tid] += prim_screen_stat_info[0]; simint->num_screened_prim[tid] += prim_screen_stat_info[1]; simint->num_unscreened_vec[tid] += prim_screen_stat_info[2]; simint->num_screened_vec[tid] += prim_screen_stat_info[3]; if (tid == 0) { simint->ostei_setup += setup_end - setup_start; simint->ostei_actual += ostei_end - ostei_start; } } void CMS_Simint_calc_DF_shellquartet_batch( Simint_p simint, int tid, int M, int N, int npair, int *P_list, double **batch_ERI, int *batch_nint, void **multi_sp_ ) { double setup_start, setup_end, ostei_start, ostei_end; if (tid == 0) setup_start = CMS_get_walltime_sec(); multi_sp_t bra_pair = &simint->shellpairs[M * simint->nshell + N]; multi_sp_t ket_pairs = (multi_sp_t) *multi_sp_; CMS_Simint_fill_DF_multi_sp_list(simint, npair, P_list, ket_pairs); simint->num_multi_shellpairs[tid] += 1.0; simint->sum_nprim[tid] += (double) ket_pairs->nprim; if (tid == 0) { setup_end = CMS_get_walltime_sec(); ostei_start = CMS_get_walltime_sec(); } double *work_buff = &simint->workbuf[tid * simint->workmem_per_thread]; double *output_buff = &simint->outbuf [tid * simint->outmem_per_thread]; int ret = simint_compute_eri( bra_pair, ket_pairs, simint->screen_tol, work_buff, output_buff ); if (tid == 0) ostei_end = CMS_get_walltime_sec(); int ERI_size; if (ret <= 0) { ERI_size = 0; // Return zero ERI_size to caller; output buffer is not initialized } else { CMS_ASSERT(ret == npair); shell_t shells = simint->shells; shell_t df_shells = simint->df_shells; int P = P_list[0]; ERI_size = NCART(shells[M].am) * NCART(shells[N].am) * NCART(df_shells[P].am); } // Shells in P_list[] have same AM, shells in Q_list[] have same AM, // The result sizes for each quartets are the same *batch_ERI = output_buff; *batch_nint = ERI_size; double *prim_screen_stat_info = *batch_ERI + ERI_size * npair; simint->num_unscreened_prim[tid] += prim_screen_stat_info[0]; simint->num_screened_prim[tid] += prim_screen_stat_info[1]; simint->num_unscreened_vec[tid] += prim_screen_stat_info[2]; simint->num_screened_vec[tid] += prim_screen_stat_info[3]; if (tid == 0) { simint->ostei_setup += setup_end - setup_start; simint->ostei_actual += ostei_end - ostei_start; } } void CMS_Simint_add_accF_timer(Simint_p simint, double sec) { simint->fock_update_F += sec; } void CMS_Simint_reset_stat_info(Simint_p simint) { int stat_info_size = sizeof(double) * simint->nthread; memset(simint->num_multi_shellpairs, 0, stat_info_size); memset(simint->sum_nprim, 0, stat_info_size); memset(simint->num_screened_prim, 0, stat_info_size); memset(simint->num_unscreened_prim, 0, stat_info_size); memset(simint->num_screened_vec, 0, stat_info_size); memset(simint->num_unscreened_vec, 0, stat_info_size); }
real_to_reciprocal.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include "phonoc_array.h" #include "phonoc_const.h" #include "real_to_reciprocal.h" #include "lapack_wrapper.h" static void real_to_reciprocal_single_thread(lapack_complex_double *fc3_reciprocal, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double *shortest_vectors, const long svecs_dims[3], const long *multiplicity, const long *p2s_map, const long *s2p_map); static void real_to_reciprocal_openmp(lapack_complex_double *fc3_reciprocal, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double *shortest_vectors, const long svecs_dims[3], const long *multiplicity, const long *p2s_map, const long *s2p_map); static void real_to_reciprocal_elements(lapack_complex_double *fc3_rec_elem, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double *shortest_vectors, const long svecs_dims[3], const long *multiplicity, const long *p2s, const long *s2p, const long pi0, const long pi1, const long pi2); static lapack_complex_double get_phase_factor(const double q[3][3], const long qi, const double *shortest_vectors, const long multi); static lapack_complex_double get_pre_phase_factor(const long i, const double q_vecs[3][3], const double *shortest_vectors, const long svecs_dims[3], const long *multiplicity, const long *p2s_map); /* fc3_reciprocal[num_patom, num_patom, num_patom, 3, 3, 3] */ void r2r_real_to_reciprocal(lapack_complex_double *fc3_reciprocal, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double *shortest_vectors, const long svecs_dims[3], const long *multiplicity, const long *p2s_map, const long *s2p_map, const long openmp_at_bands) { if (openmp_at_bands) { real_to_reciprocal_openmp(fc3_reciprocal, q_vecs, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, p2s_map, s2p_map); } else { real_to_reciprocal_single_thread(fc3_reciprocal, q_vecs, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, p2s_map, s2p_map); } } static void real_to_reciprocal_single_thread(lapack_complex_double *fc3_reciprocal, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double *shortest_vectors, const long svecs_dims[3], const long *multiplicity, const long *p2s_map, const long *s2p_map) { long i, j, k; long num_patom, adrs_shift; lapack_complex_double pre_phase_factor; num_patom = svecs_dims[1]; for (i = 0; i < num_patom; i++) { for (j = 0; j < num_patom; j++) { for (k = 0; k < num_patom; k++) { real_to_reciprocal_elements(fc3_reciprocal + i * 27 * num_patom * num_patom + j * 27 * num_patom + k * 27, q_vecs, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, p2s_map, s2p_map, i, j, k); } } pre_phase_factor = get_pre_phase_factor( i, q_vecs, shortest_vectors, svecs_dims, multiplicity, p2s_map); adrs_shift = i * num_patom * num_patom * 27; for (j = 0; j < num_patom * num_patom * 27; j++) { fc3_reciprocal[adrs_shift + j] = phonoc_complex_prod(fc3_reciprocal[adrs_shift + j], pre_phase_factor); } } } static void real_to_reciprocal_openmp(lapack_complex_double *fc3_reciprocal, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double *shortest_vectors, const long svecs_dims[3], const long *multiplicity, const long *p2s_map, const long *s2p_map) { long i, j, k, jk; long num_patom, adrs_shift; lapack_complex_double pre_phase_factor; num_patom = svecs_dims[1]; for (i = 0; i < num_patom; i++) { #pragma omp parallel for private(j, k) for (jk = 0; jk < num_patom * num_patom; jk++) { j = jk / num_patom; k = jk % num_patom; real_to_reciprocal_elements(fc3_reciprocal + i * 27 * num_patom * num_patom + j * 27 * num_patom + k * 27, q_vecs, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, p2s_map, s2p_map, i, j, k); } pre_phase_factor = get_pre_phase_factor( i, q_vecs, shortest_vectors, svecs_dims, multiplicity, p2s_map); adrs_shift = i * num_patom * num_patom * 27; #pragma omp parallel for for (j = 0; j < num_patom * num_patom * 27; j++) { fc3_reciprocal[adrs_shift + j] = phonoc_complex_prod(fc3_reciprocal[adrs_shift + j], pre_phase_factor); } } } static void real_to_reciprocal_elements(lapack_complex_double *fc3_rec_elem, const double q_vecs[3][3], const double *fc3, const long is_compact_fc3, const double *shortest_vectors, const long svecs_dims[3], const long *multiplicity, const long *p2s, const long *s2p, const long pi0, const long pi1, const long pi2) { long i, j, k, l; long num_satom, adrs_shift, adrs_vec1, adrs_vec2 ; lapack_complex_double phase_factor, phase_factor1, phase_factor2; double fc3_rec_real[27], fc3_rec_imag[27]; for (i = 0; i < 27; i++) { fc3_rec_real[i] = 0; fc3_rec_imag[i] = 0; } num_satom = svecs_dims[0]; if (is_compact_fc3) { i = pi0; } else { i = p2s[pi0]; } for (j = 0; j < num_satom; j++) { if (s2p[j] != p2s[pi1]) { continue; } adrs_vec1 = j * svecs_dims[1] + pi0; phase_factor1 = get_phase_factor(q_vecs, 1, shortest_vectors + adrs_vec1 * svecs_dims[2] * 3, multiplicity[adrs_vec1]); for (k = 0; k < num_satom; k++) { if (s2p[k] != p2s[pi2]) { continue; } adrs_vec2 = k * svecs_dims[1] + pi0; phase_factor2 = get_phase_factor(q_vecs, 2, shortest_vectors + adrs_vec2 * svecs_dims[2] * 3, multiplicity[adrs_vec2]); adrs_shift = i * 27 * num_satom * num_satom + j * 27 * num_satom + k * 27; phase_factor = phonoc_complex_prod(phase_factor1, phase_factor2); for (l = 0; l < 27; l++) { fc3_rec_real[l] += lapack_complex_double_real(phase_factor) * fc3[adrs_shift + l]; fc3_rec_imag[l] += lapack_complex_double_imag(phase_factor) * fc3[adrs_shift + l]; } } } for (i = 0; i < 27; i++) { fc3_rec_elem[i] = lapack_make_complex_double(fc3_rec_real[i], fc3_rec_imag[i]); } } static lapack_complex_double get_pre_phase_factor(const long i, const double q_vecs[3][3], const double *shortest_vectors, const long svecs_dims[3], const long *multiplicity, const long *p2s_map) { long j; double pre_phase; lapack_complex_double pre_phase_factor; pre_phase = 0; for (j = 0; j < 3; j++) { pre_phase += shortest_vectors[ p2s_map[i] * svecs_dims[1] * svecs_dims[2] * 3 + j] * (q_vecs[0][j] + q_vecs[1][j] + q_vecs[2][j]); } assert(multiplicity[p2s_map[i] * svecs_dims[1]] == 1); pre_phase *= M_2PI; pre_phase_factor = lapack_make_complex_double(cos(pre_phase), sin(pre_phase)); return pre_phase_factor; } static lapack_complex_double get_phase_factor(const double q[3][3], const long qi, const double *shortest_vectors, const long multi) { long i, j; double sum_real, sum_imag, phase; sum_real = 0; sum_imag = 0; for (i = 0; i < multi; i++) { phase = 0; for (j = 0; j < 3; j++) { phase += q[qi][j] * shortest_vectors[i * 3 + j]; } sum_real += cos(M_2PI * phase); sum_imag += sin(M_2PI * phase); } sum_real /= multi; sum_imag /= multi; return lapack_make_complex_double(sum_real, sum_imag); }
pbkdf2-hmac-sha1_fmt_plug.c
/* * This software is Copyright (c) 2013 magnum and it is hereby released to * the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pbkdf2_hmac_sha1; #elif FMT_REGISTERS_H john_register_one(&fmt_pbkdf2_hmac_sha1); #else #include <ctype.h> #include <string.h> #include <assert.h> #include <stdint.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "base64_convert.h" #include "pbkdf2_hmac_sha1.h" #include "pbkdf2_hmac_common.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "PBKDF2-HMAC-SHA1" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR #endif #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define PAD_SIZE 64 #define PLAINTEXT_LENGTH 125 static struct custom_salt { unsigned int length; unsigned int rounds; unsigned int use_utf8; //unsigned int outlen; /* Not used yet */ unsigned char salt[PBKDF2_32_MAX_SALT_SIZE]; } *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[PBKDF2_SHA1_BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *p; int saltlen; memset(&cs, 0, sizeof(cs)); ciphertext += PBKDF2_SHA1_TAG_LEN; cs.use_utf8 = ciphertext[13] == 'S'; cs.rounds = atou(ciphertext); ciphertext = strchr(ciphertext, '$') + 1; p = strchr(ciphertext, '$'); saltlen = 0; memset(cs.salt, 0, sizeof(cs.salt)); while (ciphertext < p) { /** extract salt **/ cs.salt[saltlen++] = atoi16[ARCH_INDEX(ciphertext[0])] * 16 + atoi16[ARCH_INDEX(ciphertext[1])]; ciphertext += 2; } cs.length = saltlen; return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA1 int lens[SSE_GROUP_SZ_SHA1], i; unsigned char *pin[SSE_GROUP_SZ_SHA1]; union { uint32_t *pout[SSE_GROUP_SZ_SHA1]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = crypt_out[index+i]; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), PBKDF2_SHA1_BINARY_SIZE, 0); #else pbkdf2_sha1((const unsigned char*)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->length, cur_salt->rounds, (unsigned char*)crypt_out[index], PBKDF2_SHA1_BINARY_SIZE, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], PBKDF2_SHA1_BINARY_SIZE); } static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } /* Check the FULL binary, just for good measure. There is no chance we'll have a false positive here but this function is not performance sensitive. */ static int cmp_exact(char *source, int index) { return pbkdf2_hmac_sha1_cmp_exact(get_key(index), source, cur_salt->salt, cur_salt->length, cur_salt->rounds); } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->rounds; } struct fmt_main fmt_pbkdf2_hmac_sha1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, PBKDF2_SHA1_BINARY_SIZE, PBKDF2_32_BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE, { "iteration count", }, { PBKDF2_SHA1_FORMAT_TAG, PKCS5S2_TAG, PK5K2_TAG }, pbkdf2_hmac_sha1_common_tests }, { init, done, fmt_default_reset, pbkdf2_hmac_sha1_prepare, pbkdf2_hmac_sha1_valid, pbkdf2_hmac_sha1_split, pbkdf2_hmac_sha1_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__times_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__times_uint64 // A.*B function (eWiseMult): GB_AemultB__times_uint64 // A*D function (colscale): GB_AxD__times_uint64 // D*A function (rowscale): GB_DxB__times_uint64 // C+=B function (dense accum): GB_Cdense_accumB__times_uint64 // C+=b function (dense accum): GB_Cdense_accumb__times_uint64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_uint64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_uint64 // C=scalar+B GB_bind1st__times_uint64 // C=scalar+B' GB_bind1st_tran__times_uint64 // C=A+scalar GB_bind2nd__times_uint64 // C=A'+scalar GB_bind2nd_tran__times_uint64 // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x * y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_UINT64 || GxB_NO_TIMES_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__times_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__times_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__times_uint64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__times_uint64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__times_uint64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__times_uint64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *GB_RESTRICT Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__times_uint64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__times_uint64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__times_uint64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__times_uint64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB_bind1st_tran__times_uint64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB_bind2nd_tran__times_uint64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
memdbg.h
/* ****** NOTE ****** * This header file should be the LAST header file included within every * .c file within the project. If there are .h files that have actual * code in them, then this header should be the last include within that * .h file, and that .h file should be the last one included within the * .c file. * ****** NOTE ***** */ #if !defined (__MEM_DBG_H_) #define __MEM_DBG_H_ // values to use within the MemDbg_Validate() function. #define MEMDBG_VALIDATE_MIN 0 #define MEMDBG_VALIDATE_DEEP 1 #define MEMDBG_VALIDATE_DEEPER 2 #define MEMDBG_VALIDATE_DEEPEST 3 #include <stdio.h> #include <stdlib.h> #include "os.h" #if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER #include <unistd.h> #endif #include <string.h> #include "memory.h" #if defined (MEMDBG_ON) /* * This software was written by Jim Fougeron jfoug AT cox dot net * in 2013. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2013 Jim Fougeron * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. */ /* * memdbg.h * Memory management debugging (at runtime) * * memdbg contains routines detect, and report memory * problems, such as double frees, passing bad pointers to * free, most buffer overwrites. Also, tracking of non-freed * data, showing memory leaks, can also be shown. * * Compilation Options (provided from Makefile CFLAGS) * * MEMDBG_ON If this is NOT defined, then memdbg will * get out of your way, and most normal memory functions * will be called with no overhead at all. */ /* these functions can be called by client code. Normally Memdbg_Used() and * MemDbg_Display() would be called at program exit. That will dump a list * of any memory that was not released. The MemDbg_Validate() can be called * pretty much any time. That function will walk the memory allocation linked * lists, and sqwack if there are problems, such as overwrites, freed memory that * has been written to, etc. It would likely be good to call MemDbg_Validate() * within benchmarking, after every format is tested. * * TODO: Add a handle that can be passed to the MemDbg_Used() and MemDbg_Display() * and a function to get the 'current' state of memory as a handle. Thus, a * format self test could get a handle BEFORE starting, and then check after, and * ONLY show leaked memory from the time the handle was obtained, which was at the * start of the self test. Thus it would only show leaks from that format test. * * These functions are NOT thread safe. Do not call them within OMP blocks of code. * Normally, these would be called at program exit, or within things like format * self test code, etc, and not within OMP. But this warning is here, so that * it is known NOT to call within OMP. */ extern size_t MemDbg_Used(int show_freed); extern void MemDbg_Display(FILE *); extern void MemDbg_Validate(int level); extern void MemDbg_Validate_msg(int level, const char *pMsg); extern void MemDbg_Validate_msg2(int level, const char *pMsg, int bShowExData); /* these functions should almost NEVER be called by any client code. They * are listed here, because the macros need to know their names. Client code * should almost ALWAYS call malloc() like normal, vs calling MEMDBG_alloc() * If MEMDBG_alloc() was called, and MEMDBG_ON was not defined, then this * function would not be declared here, AND at link time, the function would * not be found. * NOTE, these functions should be thread safe in OMP builds (using #pragma omp atomic) * also note, memory allocation within OMP blocks SHOULD be avoided if possible. It is * very slow, and the thread safety required makes it even slow. This is not only talking * about these functions here, BUT malloc/free in general in OMP blocks. AVOID doing that * at almost all costs, and performance will usually go up. */ extern void *MEMDBG_alloc(size_t, char *, int); extern void *MEMDBG_alloc_align(size_t, int, char *, int); extern void *MEMDBG_calloc(size_t count, size_t, char *, int); extern void *MEMDBG_realloc(void *, size_t, char *, int); extern void MEMDBG_free(const void *, char *, int); extern char *MEMDBG_strdup(const char *, char *, int); #if !defined(__MEMDBG__) /* we get here on every file compiled EXCEPT memdbg.c */ #undef malloc #undef realloc #undef free #undef strdup #undef libc_free #undef libc_calloc #undef libc_malloc #define libc_free(a) MEMDBG_libc_free(a) #define libc_malloc(a) MEMDBG_libc_alloc(a) #define libc_calloc(a,b) MEMDBG_libc_calloc(a,b) #define malloc(a) MEMDBG_alloc((a),__FILE__,__LINE__) #define calloc(a,b) MEMDBG_calloc(a,b,__FILE__,__LINE__) #define realloc(a,b) MEMDBG_realloc((a),(b),__FILE__,__LINE__) #define free(a) MEMDBG_free((a),__FILE__,__LINE__) #define strdup(a) MEMDBG_strdup((a),__FILE__,__LINE__) #endif /* !defined __MEMDBG__ */ /* pass the file handle to write to (normally stderr) */ #define MEMDBG_PROGRAM_EXIT_CHECKS(a) do { \ if (MemDbg_Used(0) > 0 || getenv("MEMDBG")) MemDbg_Display(a); \ MemDbg_Validate_msg2(MEMDBG_VALIDATE_DEEPEST, "At Program Exit", 1); } while(0) typedef struct MEMDBG_HANDLE_t { unsigned id; unsigned alloc_cnt; size_t mem_size; } MEMDBG_HANDLE; /* * these functions give a caller some of the INSIDE information about the * allocated object. We simply return data from inside the memdbg header. * NOTE, if fence post is not valid, we still return something, BUT will * also return something in the err_msg stating this may not be valid. */ /* The count 'id' of an allocated block. Same as used in leak report */ unsigned MEMDBG_get_cnt (const void *ptr, const char **err_msg); /* the size allocated of the contained block */ size_t MEMDBG_get_size(const void *ptr, const char **err_msg); /* what file (source) did the allocation */ const char *MEMDBG_get_file(const void *ptr, const char **err_msg); /* what file (source) line number did the allocation */ unsigned MEMDBG_get_line(const void *ptr, const char **err_msg); /* * these functions allow taking a memory snapshot, calling some code, then validating that memory * is the same after the code. This will help catch memory leaks and other such problems, within * formats and such. Simply get the snapshot, run self tests (or other), when it exits, check * the snapshot to make sure nothing leaked. */ /* returning a struct (or passing as params it not super efficient but this is done so infrequently that this is not an issue. */ MEMDBG_HANDLE MEMDBG_getSnapshot(int id); /* will not exit on leaks. Does exit, on memory overwrite corruption. */ void MEMDBG_checkSnapshot(MEMDBG_HANDLE); /* same as MEMDBG_checkSnapshot() but if exit_on_any_leaks is true, will also exit if leaks found. */ void MEMDBG_checkSnapshot_possible_exit_on_error(MEMDBG_HANDLE, int exit_on_any_leaks); /* * the allocations from mem_alloc_tiny() must call this function to flag the memory they allocate * so it is not flagged as a leak, by these HANDLE snapshot functions. 'tiny' memory is expected * to leak, until program exit. At that time, any that was not freed, will be shown as leaked. * THIS function is also thread safe. The other checkSnapshot functions are NOT thread safe. */ void MEMDBG_tag_mem_from_alloc_tiny(void *); extern void MEMDBG_libc_free(void *); extern void *MEMDBG_libc_alloc(size_t size); extern void *MEMDBG_libc_calloc(size_t count, size_t size); #else #define libc_alloc alloc #define libc_calloc calloc #define libc_free free #define MemDbg_Used(a) 0 #define MemDbg_Display(a) #define MemDbg_Validate(a) #define MemDbg_Validate_msg(a,b) #define MemDbg_Validate_msg2(a,b,c) #define MEMDBG_PROGRAM_EXIT_CHECKS(a) #define MEMDBG_tag_mem_from_alloc_tiny(a) #define MEMDBG_HANDLE int #define MEMDBG_getSnapshot(a) 0 #define MEMDBG_checkSnapshot(a) if(a) printf(" \b") #define MEMDBG_checkSnapshot_possible_exit_on_error(a, b) if(a) printf(" \b") #endif /* MEMDBG_ON */ #endif /* __MEMDBG_H_ */
main.c
/* ******************************************************************************** * Polytech'Nice Sophia * * * Filename : main.c * Programmer : Loïc ROSE * Description : algorithm to find the maximal subsequence of an array ******************************************************************************** */ /* ******************************************************************************** * INCLUDE FILES ******************************************************************************** */ #include <stdlib.h> #include <stdio.h> #include <omp.h> #include <math.h> #include <limits.h> /* ******************************************************************************** * CONSTANTS & MACRO ******************************************************************************** */ #define SUFFIX_MODE 1 #define PREFIX_MODE 2 #define SUM_OPERATION 3 #define MAX_OPERATION 4 #define MAX(X,Y) (((X) > (Y)) ? (X) : (Y)) #define SUM(X,Y) ((X) + (Y)) /* ******************************************************************************** * GLOBAL VARIABLES ******************************************************************************** */ int m; struct tablo { int * tab; int size; }; /* ******************************************************************************** * getLog2() * * Description : method to get the log base 2 of a number * Arguments : an integer * Returns : the log2 of the given number ******************************************************************************** */ int getLog2(int x) { return log(x) / log(2); } /* ******************************************************************************** * printArray() * * Description : display the array given in parameter * Arguments : a struct tablo (that represents an array) * Returns : void ******************************************************************************** */ void printArray(struct tablo * tmp) { printf("---- Array of size %i ----\n", tmp->size); int size = tmp->size; for (int i = 0; i < size; ++i) { printf("%i ", tmp->tab[i]); } printf("\n"); } /* ******************************************************************************** * allocateTablo() * * Description : allocate enough memory for a tablo struct * Arguments : an integer * Returns : struct tablo with the specified size given in parameters ******************************************************************************** */ struct tablo * allocateTablo(int size) { struct tablo * tmp = malloc(sizeof(struct tablo)); tmp->size = size; tmp->tab = malloc(size*sizeof(int)); tmp->tab[0] = 0; return tmp; } /* ******************************************************************************** * generateArray() * * Description : method to generate and fill a sample array with custom values * Arguments : struct tablo * Returns : void ******************************************************************************** */ void generateArray(struct tablo * s) { s->size = 16; s->tab = malloc(s->size*sizeof(int)); s->tab[0] = 3; s->tab[1] = 2; s->tab[2] = -7; s->tab[3] = 11; s->tab[4] = 10; s->tab[5] = -6; s->tab[6] = 4; s->tab[7] = 9; s->tab[8] = -6; s->tab[9] = 1; s->tab[10] = -2; s->tab[11] = -3; s->tab[12] = 4; s->tab[13] = -3; s->tab[14] = 0; s->tab[15] = 2; } /* ******************************************************************************** * up() * * Description : first step of the algorithm presented in class * Arguments : 2 struct tablo (source, destination) and an integer that represents the operation that needs to be done (SUM_OPERATION or MAX_OPERATION) * Returns : void ******************************************************************************** */ void up(struct tablo * src, struct tablo * dst, int operation) { int size_src = src->size; /* size of the source array */ int size_dst = dst->size; /* size of the destination array */ // Copy the source array at the end of the destination array for (int i = size_dst - 1; i > (size_dst/2) - 1; i--) { dst->tab[i] = src->tab[i - size_src]; } for (int l = m - 1; l >= 0; l--) { int start = pow(2, l); int end = pow(2, l+1) -1; #pragma omp parallel for for (int j = start; j <= end; j++) { dst->tab[j] = (operation == SUM_OPERATION) ? SUM(dst->tab[2*j], dst->tab[2*j+1]) : MAX(dst->tab[2*j], dst->tab[2*j+1]); } } } /* ******************************************************************************** * down() * * Description : second step of the algorithm presented in class * Arguments : 2 struct tablo (a and b) and 2 integers - the first integer represents the mode (PREFIX_MODE or SUFFIX_MODE) and the second integer represents the operation that needs to be done (SUM_OPERATION or MAX_OPERATION) * Returns : void ******************************************************************************** */ void down(struct tablo * a, struct tablo * b, int mode, int operation) { b->tab[1] = (operation == SUM_OPERATION) ? 0 : INT_MIN; for (int l = 1; l <= m; l++) { int start = pow(2, l); int end = pow(2, l+1) - 1; #pragma omp parallel for for (int j = start; j <= end; j++) { if (j%2 == 0) { if (mode == PREFIX_MODE) b->tab[j] = b->tab[j/2]; if (mode == SUFFIX_MODE) b->tab[j] = (operation == SUM_OPERATION) ? SUM(b->tab[j/2], a->tab[j+1]) : MAX(b->tab[j/2], a->tab[j+1]); } else { if (mode == PREFIX_MODE) b->tab[j] = (operation == SUM_OPERATION) ? SUM(b->tab[j/2], a->tab[j-1]) : MAX(b->tab[j/2], a->tab[j-1]); if (mode == SUFFIX_MODE) b->tab[j] = b->tab[j/2]; } } } } /* ******************************************************************************** * final() * * Description : the last step of the algorithm presented in class * Arguments : 2 struct tablo (a and b) and the operation that needs to be done (SUM_OPERATION or MAX_OPERATION) * Returns : void ******************************************************************************** */ void final(struct tablo * a, struct tablo * b, int operation) { int start = pow(2, m); int end = pow(2, m + 1) - 1; #pragma omp parallel for for (int j = start; j <= end; j++) { b->tab[j] = (operation == SUM_OPERATION) ? SUM(b->tab[j], a->tab[j]) : MAX(b->tab[j], a->tab[j]); } } /* ******************************************************************************** * do_up_down_final() * * Description : used to do all the algorithm presented in class from the begining to the end (up, down and final) * Arguments : 2 struct tablo (the source and the destination), an integer that represents the mode (SUFFIX_MODE or PREFIX_MODE) and an integer that represents the operation (SUM_OPERATION or MAX_OPERATION) * Returns : void ******************************************************************************** */ void do_up_down_final(struct tablo * src, struct tablo * dst, int mode, int operation) { int size_src = src->size; /* size of the source array */ int size_dst = dst->size; /* size of the destination array */ struct tablo * a = allocateTablo(size_src*2); struct tablo * b = allocateTablo(size_src*2); up(src, a, operation); down(a, b, mode, operation); final(a, b, operation); int size_of_b = b->size; for (int i = 0; i < size_dst; i++) { dst->tab[i] = b->tab[(size_of_b/2) + i]; } } /* ******************************************************************************** * findMaxTablo() * * Description : 5th step of the algorithm that (cf. TD) * Arguments : 5 struct tablo (the source, PSUM, SSUM, PMAX and SMAX) * Returns : the tablo of max M ******************************************************************************** */ struct tablo * findMaxTablo(struct tablo * src, struct tablo * PSUM, struct tablo * SSUM, struct tablo * PMAX, struct tablo * SMAX) { int size_src = src->size; struct tablo * Ms = allocateTablo(size_src); struct tablo * Mp = allocateTablo(size_src); struct tablo * M = allocateTablo(size_src); #pragma omp parallel for for (int i = 0; i < size_src; i++) { Ms->tab[i] = PMAX->tab[i] - SSUM->tab[i] + src->tab[i]; Mp->tab[i] = SMAX->tab[i] - PSUM->tab[i] + src->tab[i]; M->tab[i] = Ms->tab[i] + Mp->tab[i] - src->tab[i]; } return M; } /* ******************************************************************************** * find_max_value() * * Description : find the maximum value of an array * Arguments : an array that represents the final array of the algorithm (M) * Returns : the maximum of the array ******************************************************************************** */ int find_max_value(struct tablo * M) { int size_of_m = M->size; int max = INT_MIN; #pragma omp parallel for for (int i = 0; i < size_of_m; i++) { if (M->tab[i] > max) { max = M->tab[i]; } } return max; } /* ******************************************************************************** * display_result() * * Description : display the final result of the algorithm (the max sum and the subsequence associated) accorded to the format required * Arguments : 2 struct tablo (the source and the max) and an integer that represents the maximum of M array * Returns : void ******************************************************************************** */ void display_result(struct tablo * src, struct tablo * M, int max) { int size_src = src->size; printf("%d ", max); for (int i = 0; i < size_src; i++) { if (M->tab[i] == max) printf("%d ", src->tab[i]); } printf("\n"); } /* ******************************************************************************** * generateArrayFromFile() * * Description : generate the array by giving a file as parameter * Arguments : a struct tablo (that will be the source) and a char* that represents the filename * Returns : void ******************************************************************************** */ void generateArrayFromFile(struct tablo * s, char * filename) { FILE *file = NULL; file = fopen(filename, "r"); if (file != NULL) { int * numbers = {0}; /* array of integers deduced from the file */ int current_number; /* the current integer in process */ fseek(file, 0, SEEK_END); /* Go to the end of the file */ long length_of_file = ftell(file); /* Get the length of the file */ fseek(file, 0, SEEK_SET); /* Go the the begining of the file */ numbers = malloc(sizeof(int)*length_of_file); int number_of_numbers = 0; // Read the file... while(!feof(file)) { // If we find a number, we add it in the array and modify the number of numbers if (fscanf(file, "%d", &current_number) == 1) { numbers[number_of_numbers++] = current_number; } } /* Update of the struct tablo given in parameter */ s->size = number_of_numbers; s->tab = malloc(number_of_numbers*sizeof(int)); for (int i = 0; i < number_of_numbers; i++) { s->tab[i] = numbers[i]; } m = getLog2(number_of_numbers); /* size of source = n = pow(2, m) */ fclose(file); } else { perror(filename); } } /* ******************************************************************************** * main() * * Description : the main method of the program * Arguments : argc and argv - classic * Returns : integer to say if the execution is done corectly or not ******************************************************************************** */ int main(int argc, char **argv) { struct tablo source; generateArrayFromFile(&source, argv[1]); struct tablo * PSUM = allocateTablo(source.size); do_up_down_final(&source, PSUM, PREFIX_MODE, SUM_OPERATION); struct tablo * SSUM = allocateTablo(source.size); do_up_down_final(&source, SSUM, SUFFIX_MODE, SUM_OPERATION); struct tablo * SMAX = allocateTablo(source.size); do_up_down_final(PSUM, SMAX, SUFFIX_MODE, MAX_OPERATION); struct tablo * PMAX = allocateTablo(source.size); do_up_down_final(SSUM, PMAX, PREFIX_MODE, MAX_OPERATION); struct tablo * M = findMaxTablo(&source, PSUM, SSUM, PMAX, SMAX); int max = find_max_value(M); display_result(&source, M, max); return 0; }
DRB001-antidep1-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A loop with loop-carried anti-dependence. Data race pair: a[i+1]@64:10 vs. a[i]@64:5 */ #include <stdio.h> #include <stdlib.h> int main(int argc, char* argv[]) { int i; int len = 1000; int a[1000]; #pragma omp parallel for private(i) for (i=0; i<len; i++) a[i]= i; for (i=0;i< len -1 ;i++) a[i]=a[i+1]+1; printf ("a[500]=%d\n", a[500] ); return 0; }
pi_loop.c
/* This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. The value of this integral is pi -- which is great since it gives us an easy way to check the answer. History: Written by Tim Mattson, 11/99. */ #include <stdio.h> #include <omp.h> static long num_steps = 1000000000; double step; int main () { int i; double x, pi, sum = 0.0; double start_time, run_time; step = 1.0/(double) num_steps; for (i=1;i<=16;i++){ sum = 0.0; omp_set_num_threads(i); start_time = omp_get_wtime(); #pragma omp parallel { double x; #pragma omp single printf(" num_threads = %d",omp_get_num_threads()); #pragma omp for reduction(+:sum) for (i=1;i<= num_steps; i++){ x = (i-0.5)*step; sum = sum + 4.0/(1.0+x*x); } } pi = step * sum; run_time = omp_get_wtime() - start_time; printf("\n pi is %f in %f seconds and %d threads\n",pi,run_time,i); } }
tstile.h
void tstile() { int c0,c1,c2,c3,c5,c6,c7,c9,c11,c10,c4,c12; if(1==1) for( c0 = 0; c0 <= floord(N - 2, 8); c0 += 1) #pragma omp parallel for schedule(dynamic, 1) for( c1 = (c0 + 1) / 2; c1 <= min(c0, (N - 1) / 16); c1 += 1) for( c3 = 16 * c0 - 16 * c1 + 1; c3 <= min(min(N - 1, 16 * c1 + 15), 16 * c0 - 16 * c1 + 16); c3 += 1) { for( c4 = 0; c4 <= c0 - c1; c4 += 1) for( c6 = max(-N + 16 * c1 + 1, -N + c3 + 1); c6 <= min(0, -N + 16 * c1 + 16); c6 += 1) { for( c10 = 16 * c4; c10 <= min(c3 - 1, 16 * c4 + 15); c10 += 1) S[(-c6)][(c3-c6)] = MAX(S[(-c6)][c10+(-c6)] + S[c10+(-c6)+1][(c3-c6)], S[(-c6)][(c3-c6)]); if (c1 + c4 == c0 && 16 * c0 + c6 + 15 >= 16 * c1 + c3) S[(-c6)][(c3-c6)] = MAX(S[(-c6)][(c3-c6)], S[(-c6)+1][(c3-c6)-1] + can_pair(RNA, (-c6), (c3-c6))); } for( c4 = max(c0 - c1 + 1, -c1 + (N + c3) / 16 - 1); c4 <= min((N - 1) / 16, -c1 + (N + c3 - 1) / 16); c4 += 1) for( c6 = max(max(-N + 16 * c1 + 1, -N + c3 + 1), c3 - 16 * c4 - 15); c6 <= min(-N + 16 * c1 + 16, c3 - 16 * c4); c6 += 1) S[(-c6)][(c3-c6)] = MAX(S[(-c6)][(c3-c6)], S[(-c6)+1][(c3-c6)-1] + can_pair(RNA, (-c6), (c3-c6))); } }
cp-tree.h
/* * Copyright (C) 2007. QLogic Corporation. All Rights Reserved. */ /* Definitions for C++ parsing and type checking. Copyright (C) 1987, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiemann@cygnus.com) This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING. If not, write to the Free Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef GCC_CP_TREE_H #define GCC_CP_TREE_H #include "ggc.h" #include "function.h" #include "hashtab.h" #include "splay-tree.h" #include "vec.h" #include "varray.h" #include "c-common.h" #include "name-lookup.h" struct diagnostic_context; /* Usage of TREE_LANG_FLAG_?: 0: IDENTIFIER_MARKED (IDENTIFIER_NODEs) NEW_EXPR_USE_GLOBAL (in NEW_EXPR). DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR). COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR). TREE_INDIRECT_USING (in NAMESPACE_DECL). CLEANUP_P (in TRY_BLOCK) AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR) PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF) PAREN_STRING_LITERAL (in STRING_CST) DECL_PRETTY_FUNCTION_P (in VAR_DECL) KOENIG_LOOKUP_P (in CALL_EXPR) STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST). EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT) STMT_EXPR_NO_SCOPE (in STMT_EXPR) BIND_EXPR_TRY_BLOCK (in BIND_EXPR) TYPENAME_IS_ENUM_P (in TYPENAME_TYPE) REFERENCE_REF_P (in INDIRECT_EXPR) QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF) OMP_ATOMIC_DEPENDENT_P (in OMP_ATOMIC) OMP_FOR_GIMPLIFYING_P (in OMP_FOR) BASELINK_QUALIFIED_P (in BASELINK) TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR) 1: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE) TI_PENDING_TEMPLATE_FLAG. TEMPLATE_PARMS_FOR_INLINE. DELETE_EXPR_USE_VEC (in DELETE_EXPR). (TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out). ICS_ELLIPSIS_FLAG (in _CONV) DECL_INITIALIZED_P (in VAR_DECL) TYPENAME_IS_CLASS_P (in TYPENAME_TYPE) STMT_IS_FULL_EXPR_P (in _STMT) 2: IDENTIFIER_OPNAME_P (in IDENTIFIER_NODE) ICS_THIS_FLAG (in _CONV) DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL) STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST) 3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out). ICS_BAD_FLAG (in _CONV) FN_TRY_BLOCK_P (in TRY_BLOCK) IDENTIFIER_CTOR_OR_DTOR_P (in IDENTIFIER_NODE) BIND_EXPR_BODY_BLOCK (in BIND_EXPR) DECL_NON_TRIVIALLY_INITIALIZED_P (in VAR_DECL) 4: TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR, or FIELD_DECL). IDENTIFIER_TYPENAME_P (in IDENTIFIER_NODE) DECL_TINFO_P (in VAR_DECL) 5: C_IS_RESERVED_WORD (in IDENTIFIER_NODE) DECL_VTABLE_OR_VTT_P (in VAR_DECL) 6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE) DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL) TYPE_MARKED_P (in _TYPE) Usage of TYPE_LANG_FLAG_?: 0: TYPE_DEPENDENT_P 1: TYPE_HAS_CONSTRUCTOR. 2: Unused 3: TYPE_FOR_JAVA. 4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR 5: IS_AGGR_TYPE. 6: TYPE_DEPENDENT_P_VALID Usage of DECL_LANG_FLAG_?: 0: DECL_ERROR_REPORTED (in VAR_DECL). DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL) DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL) DECL_MUTABLE_P (in FIELD_DECL) DECL_DEPENDENT_P (in USING_DECL) 1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL). DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL) DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL) 2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL). DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL) 3: DECL_IN_AGGR_P. 4: DECL_C_BIT_FIELD (in a FIELD_DECL) DECL_VAR_MARKED_P (in a VAR_DECL) DECL_SELF_REFERENCE_P (in a TYPE_DECL) DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL) 5: DECL_INTERFACE_KNOWN. 6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL). DECL_FIELD_IS_BASE (in FIELD_DECL) 7: DECL_DEAD_FOR_LOCAL (in VAR_DECL). DECL_THUNK_P (in a member FUNCTION_DECL) Usage of language-independent fields in a language-dependent manner: TYPE_ALIAS_SET This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so forth as a substitute for the mark bits provided in `lang_type'. At present, only the six low-order bits are used. TYPE_LANG_SLOT_1 For an ENUMERAL_TYPE, this is ENUM_TEMPLATE_INFO. For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS BINFO_VIRTUALS For a binfo, this is a TREE_LIST. There is an entry for each virtual function declared either in BINFO or its direct and indirect primary bases. The BV_DELTA of each node gives the amount by which to adjust the `this' pointer when calling the function. If the method is an overridden version of a base class method, then it is assumed that, prior to adjustment, the this pointer points to an object of the base class. The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable index of the vcall offset for this entry. The BV_FN is the declaration for the virtual function itself. BINFO_VTABLE This is an expression with POINTER_TYPE that gives the value to which the vptr should be initialized. Use get_vtbl_decl_for_binfo to extract the VAR_DECL for the complete vtable. DECL_ARGUMENTS For a VAR_DECL this is DECL_ANON_UNION_ELEMS. DECL_VINDEX This field is NULL for a non-virtual function. For a virtual function, it is eventually set to an INTEGER_CST indicating the index in the vtable at which this function can be found. When a virtual function is declared, but before it is known what function is overridden, this field is the error_mark_node. Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is the virtual function this one overrides, and whose TREE_CHAIN is the old DECL_VINDEX. */ /* Language-specific tree checkers. */ #define VAR_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL) #define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \ TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL) #define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \ TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL) #define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \ TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM) #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define NON_THUNK_FUNCTION_CHECK(NODE) __extension__ \ ({ const tree __t = (NODE); \ if (TREE_CODE (__t) != FUNCTION_DECL && \ TREE_CODE (__t) != TEMPLATE_DECL && __t->decl_common.lang_specific \ && __t->decl_common.lang_specific->decl_flags.thunk_p) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \ __t; }) #define THUNK_FUNCTION_CHECK(NODE) __extension__ \ ({ const tree __t = (NODE); \ if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \ || !__t->decl_common.lang_specific->decl_flags.thunk_p) \ tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \ __t; }) #else #define NON_THUNK_FUNCTION_CHECK(NODE) (NODE) #define THUNK_FUNCTION_CHECK(NODE) (NODE) #endif /* Language-dependent contents of an identifier. */ struct lang_identifier GTY(()) { struct c_common_identifier c_common; cxx_binding *namespace_bindings; cxx_binding *bindings; tree class_template_info; tree label_value; }; /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword, and C_RID_YYCODE is the token number wanted by Yacc. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_5 (ID) #define LANG_IDENTIFIER_CAST(NODE) \ ((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE)) struct template_parm_index_s GTY(()) { struct tree_common common; HOST_WIDE_INT index; HOST_WIDE_INT level; HOST_WIDE_INT orig_level; tree decl; }; typedef struct template_parm_index_s template_parm_index; struct tinst_level_s GTY(()) { struct tree_common common; tree decl; location_t locus; int in_system_header_p; }; typedef struct tinst_level_s * tinst_level_t; struct ptrmem_cst GTY(()) { struct tree_common common; /* This isn't used, but the middle-end expects all constants to have this field. */ rtx rtl; tree member; }; typedef struct ptrmem_cst * ptrmem_cst_t; #define IDENTIFIER_GLOBAL_VALUE(NODE) \ namespace_binding ((NODE), global_namespace) #define SET_IDENTIFIER_GLOBAL_VALUE(NODE, VAL) \ set_namespace_binding ((NODE), global_namespace, (VAL)) #define IDENTIFIER_NAMESPACE_VALUE(NODE) \ namespace_binding ((NODE), current_namespace) #define SET_IDENTIFIER_NAMESPACE_VALUE(NODE, VAL) \ set_namespace_binding ((NODE), current_namespace, (VAL)) #define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE)) #define BIND_EXPR_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE)) /* Used to mark the block around the member initializers and cleanups. */ #define BIND_EXPR_BODY_BLOCK(NODE) \ TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE)) #define FUNCTION_NEEDS_BODY_BLOCK(NODE) \ (DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE)) #define STATEMENT_LIST_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE)) #define STATEMENT_LIST_TRY_BLOCK(NODE) \ TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE)) /* Nonzero if this statement should be considered a full-expression, i.e., if temporaries created during this statement should have their destructors run at the end of this statement. */ #define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE)) /* Marks the result of a statement expression. */ #define EXPR_STMT_STMT_EXPR_RESULT(NODE) \ TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE)) /* Nonzero if this statement-expression does not have an associated scope. */ #define STMT_EXPR_NO_SCOPE(NODE) \ TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE)) /* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual sense of `same'. */ #define same_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_STRICT) /* Returns nonzero iff TYPE1 and TYPE2 are the same type, ignoring top-level qualifiers. */ #define same_type_ignoring_top_level_qualifiers_p(TYPE1, TYPE2) \ same_type_p (TYPE_MAIN_VARIANT (TYPE1), TYPE_MAIN_VARIANT (TYPE2)) /* Nonzero if we are presently building a statement tree, rather than expanding each statement as we encounter it. */ #define building_stmt_tree() (cur_stmt_list != NULL_TREE) /* Returns nonzero iff NODE is a declaration for the global function `main'. */ #define DECL_MAIN_P(NODE) \ (DECL_EXTERN_C_FUNCTION_P (NODE) \ && DECL_NAME (NODE) != NULL_TREE \ && MAIN_NAME_P (DECL_NAME (NODE))) /* The overloaded FUNCTION_DECL. */ #define OVL_FUNCTION(NODE) \ (((struct tree_overload*)OVERLOAD_CHECK (NODE))->function) #define OVL_CHAIN(NODE) TREE_CHAIN (NODE) /* Polymorphic access to FUNCTION and CHAIN. */ #define OVL_CURRENT(NODE) \ ((TREE_CODE (NODE) == OVERLOAD) ? OVL_FUNCTION (NODE) : (NODE)) #define OVL_NEXT(NODE) \ ((TREE_CODE (NODE) == OVERLOAD) ? TREE_CHAIN (NODE) : NULL_TREE) /* If set, this was imported in a using declaration. This is not to confuse with being used somewhere, which is not important for this node. */ #define OVL_USED(NODE) TREE_USED (NODE) struct tree_overload GTY(()) { struct tree_common common; tree function; }; /* Returns true iff NODE is a BASELINK. */ #define BASELINK_P(NODE) \ (TREE_CODE (NODE) == BASELINK) /* The BINFO indicating the base from which the BASELINK_FUNCTIONS came. */ #define BASELINK_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo) /* The functions referred to by the BASELINK; either a FUNCTION_DECL, a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */ #define BASELINK_FUNCTIONS(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->functions) /* The BINFO in which the search for the functions indicated by this baselink began. This base is used to determine the accessibility of functions selected by overload resolution. */ #define BASELINK_ACCESS_BINFO(NODE) \ (((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo) /* For a type-conversion operator, the BASELINK_OPTYPE indicates the type to which the conversion should occur. This value is important if the BASELINK_FUNCTIONS include a template conversion operator -- the BASELINK_OPTYPE can be used to determine what type the user requested. */ #define BASELINK_OPTYPE(NODE) \ (TREE_CHAIN (BASELINK_CHECK (NODE))) /* Non-zero if this baselink was from a qualified lookup. */ #define BASELINK_QUALIFIED_P(NODE) \ TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE)) struct tree_baselink GTY(()) { struct tree_common common; tree binfo; tree functions; tree access_binfo; }; /* The different kinds of ids that we encounter. */ typedef enum cp_id_kind { /* Not an id at all. */ CP_ID_KIND_NONE, /* An unqualified-id that is not a template-id. */ CP_ID_KIND_UNQUALIFIED, /* An unqualified-id that is a dependent name. */ CP_ID_KIND_UNQUALIFIED_DEPENDENT, /* An unqualified template-id. */ CP_ID_KIND_TEMPLATE_ID, /* A qualified-id. */ CP_ID_KIND_QUALIFIED } cp_id_kind; /* Macros for access to language-specific slots in an identifier. */ #define IDENTIFIER_NAMESPACE_BINDINGS(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->namespace_bindings) #define IDENTIFIER_TEMPLATE(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->class_template_info) /* The IDENTIFIER_BINDING is the innermost cxx_binding for the identifier. It's PREVIOUS is the next outermost binding. Each VALUE field is a DECL for the associated declaration. Thus, name lookup consists simply of pulling off the node at the front of the list (modulo oddities for looking up the names of types, and such.) You can use SCOPE field to determine the scope that bound the name. */ #define IDENTIFIER_BINDING(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->bindings) /* TREE_TYPE only indicates on local and class scope the current type. For namespace scope, the presence of a type in any namespace is indicated with global_type_node, and the real type behind must be found through lookup. */ #define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE) #define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE) #define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE)) #define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0) #define IDENTIFIER_LABEL_VALUE(NODE) \ (LANG_IDENTIFIER_CAST (NODE)->label_value) #define SET_IDENTIFIER_LABEL_VALUE(NODE, VALUE) \ IDENTIFIER_LABEL_VALUE (NODE) = (VALUE) /* Nonzero if this identifier is used as a virtual function name somewhere (optimizes searches). */ #define IDENTIFIER_VIRTUAL_P(NODE) TREE_LANG_FLAG_1 (NODE) /* Nonzero if this identifier is the prefix for a mangled C++ operator name. */ #define IDENTIFIER_OPNAME_P(NODE) TREE_LANG_FLAG_2 (NODE) /* Nonzero if this identifier is the name of a type-conversion operator. */ #define IDENTIFIER_TYPENAME_P(NODE) \ TREE_LANG_FLAG_4 (NODE) /* Nonzero if this identifier is the name of a constructor or destructor. */ #define IDENTIFIER_CTOR_OR_DTOR_P(NODE) \ TREE_LANG_FLAG_3 (NODE) /* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague linkage which the prelinker has assigned to this translation unit. */ #define IDENTIFIER_REPO_CHOSEN(NAME) \ (TREE_LANG_FLAG_6 (NAME)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) \ (LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly) /* The tokens stored in the default argument. */ #define DEFARG_TOKENS(NODE) \ (((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens) #define DEFARG_INSTANTIATIONS(NODE) \ (((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations) struct tree_default_arg GTY (()) { struct tree_common common; struct cp_token_cache *tokens; VEC(tree,gc) *instantiations; }; enum cp_tree_node_structure_enum { TS_CP_GENERIC, TS_CP_IDENTIFIER, TS_CP_TPI, TS_CP_TINST_LEVEL, TS_CP_PTRMEM, TS_CP_BINDING, TS_CP_OVERLOAD, TS_CP_BASELINK, TS_CP_WRAPPER, TS_CP_DEFAULT_ARG, LAST_TS_CP_ENUM }; /* The resulting tree type. */ union lang_tree_node GTY((desc ("cp_tree_node_structure (&%h)"), chain_next ("(union lang_tree_node *)TREE_CHAIN (&%h.generic)"))) { union tree_node GTY ((tag ("TS_CP_GENERIC"), desc ("tree_node_structure (&%h)"))) generic; struct template_parm_index_s GTY ((tag ("TS_CP_TPI"))) tpi; struct tinst_level_s GTY ((tag ("TS_CP_TINST_LEVEL"))) tinst_level; struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem; struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload; struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink; struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg; struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier; }; enum cp_tree_index { CPTI_JAVA_BYTE_TYPE, CPTI_JAVA_SHORT_TYPE, CPTI_JAVA_INT_TYPE, CPTI_JAVA_LONG_TYPE, CPTI_JAVA_FLOAT_TYPE, CPTI_JAVA_DOUBLE_TYPE, CPTI_JAVA_CHAR_TYPE, CPTI_JAVA_BOOLEAN_TYPE, CPTI_WCHAR_DECL, CPTI_VTABLE_ENTRY_TYPE, CPTI_DELTA_TYPE, CPTI_VTABLE_INDEX_TYPE, CPTI_CLEANUP_TYPE, CPTI_VTT_PARM_TYPE, CPTI_CLASS_TYPE, CPTI_UNKNOWN_TYPE, CPTI_VTBL_TYPE, CPTI_VTBL_PTR_TYPE, CPTI_STD, CPTI_ABI, CPTI_CONST_TYPE_INFO_TYPE, CPTI_TYPE_INFO_PTR_TYPE, CPTI_ABORT_FNDECL, CPTI_GLOBAL_DELETE_FNDECL, CPTI_AGGR_TAG, CPTI_CTOR_IDENTIFIER, CPTI_COMPLETE_CTOR_IDENTIFIER, CPTI_BASE_CTOR_IDENTIFIER, CPTI_DTOR_IDENTIFIER, CPTI_COMPLETE_DTOR_IDENTIFIER, CPTI_BASE_DTOR_IDENTIFIER, CPTI_DELETING_DTOR_IDENTIFIER, CPTI_DELTA_IDENTIFIER, CPTI_IN_CHARGE_IDENTIFIER, CPTI_VTT_PARM_IDENTIFIER, CPTI_NELTS_IDENTIFIER, CPTI_THIS_IDENTIFIER, CPTI_PFN_IDENTIFIER, CPTI_VPTR_IDENTIFIER, CPTI_STD_IDENTIFIER, CPTI_LANG_NAME_C, CPTI_LANG_NAME_CPLUSPLUS, CPTI_LANG_NAME_JAVA, CPTI_EMPTY_EXCEPT_SPEC, CPTI_JCLASS, CPTI_TERMINATE, CPTI_CALL_UNEXPECTED, CPTI_ATEXIT, CPTI_DSO_HANDLE, CPTI_DCAST, CPTI_KEYED_CLASSES, CPTI_MAX }; extern GTY(()) tree cp_global_trees[CPTI_MAX]; #define java_byte_type_node cp_global_trees[CPTI_JAVA_BYTE_TYPE] #define java_short_type_node cp_global_trees[CPTI_JAVA_SHORT_TYPE] #define java_int_type_node cp_global_trees[CPTI_JAVA_INT_TYPE] #define java_long_type_node cp_global_trees[CPTI_JAVA_LONG_TYPE] #define java_float_type_node cp_global_trees[CPTI_JAVA_FLOAT_TYPE] #define java_double_type_node cp_global_trees[CPTI_JAVA_DOUBLE_TYPE] #define java_char_type_node cp_global_trees[CPTI_JAVA_CHAR_TYPE] #define java_boolean_type_node cp_global_trees[CPTI_JAVA_BOOLEAN_TYPE] #define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL] #define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE] /* The type used to represent an offset by which to adjust the `this' pointer in pointer-to-member types. */ #define delta_type_node cp_global_trees[CPTI_DELTA_TYPE] /* The type used to represent an index into the vtable. */ #define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE] #define class_type_node cp_global_trees[CPTI_CLASS_TYPE] #define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE] #define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE] #define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE] #define std_node cp_global_trees[CPTI_STD] #define abi_node cp_global_trees[CPTI_ABI] #define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE] #define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE] #define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL] #define global_delete_fndecl cp_global_trees[CPTI_GLOBAL_DELETE_FNDECL] #define current_aggr cp_global_trees[CPTI_AGGR_TAG] /* We cache these tree nodes so as to call get_identifier less frequently. */ /* The name of a constructor that takes an in-charge parameter to decide whether or not to construct virtual base classes. */ #define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER] /* The name of a constructor that constructs virtual base classes. */ #define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER] /* The name of a constructor that does not construct virtual base classes. */ #define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER] /* The name of a destructor that takes an in-charge parameter to decide whether or not to destroy virtual base classes and whether or not to delete the object. */ #define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes. */ #define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER] /* The name of a destructor that does not destroy virtual base classes. */ #define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER] /* The name of a destructor that destroys virtual base classes, and then deletes the entire object. */ #define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER] #define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER] #define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER] /* The name of the parameter that contains a pointer to the VTT to use for this subobject constructor or destructor. */ #define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER] #define nelts_identifier cp_global_trees[CPTI_NELTS_IDENTIFIER] #define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER] #define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER] #define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER] /* The name of the std namespace. */ #define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER] #define lang_name_c cp_global_trees[CPTI_LANG_NAME_C] #define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS] #define lang_name_java cp_global_trees[CPTI_LANG_NAME_JAVA] /* Exception specifier used for throw(). */ #define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC] /* If non-NULL, a POINTER_TYPE equivalent to (java::lang::Class*). */ #define jclass_node cp_global_trees[CPTI_JCLASS] /* The declaration for `std::terminate'. */ #define terminate_node cp_global_trees[CPTI_TERMINATE] /* The declaration for "__cxa_call_unexpected". */ #define call_unexpected_node cp_global_trees[CPTI_CALL_UNEXPECTED] /* A pointer to `std::atexit'. */ #define atexit_node cp_global_trees[CPTI_ATEXIT] /* A pointer to `__dso_handle'. */ #define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE] /* The declaration of the dynamic_cast runtime. */ #define dynamic_cast_node cp_global_trees[CPTI_DCAST] /* The type of a destructor. */ #define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE] /* The type of the vtt parameter passed to subobject constructors and destructors. */ #define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE] /* A TREE_LIST of the dynamic classes whose vtables may have to be emitted in this translation unit. */ #define keyed_classes cp_global_trees[CPTI_KEYED_CLASSES] /* Node to indicate default access. This must be distinct from the access nodes in tree.h. */ #define access_default_node null_node /* Global state. */ struct saved_scope GTY(()) { VEC(cxx_saved_binding,gc) *old_bindings; tree old_namespace; tree decl_ns_list; tree class_name; tree class_type; tree access_specifier; tree function_decl; VEC(tree,gc) *lang_base; tree lang_name; tree template_parms; struct cp_binding_level *x_previous_class_level; tree x_saved_tree; HOST_WIDE_INT x_processing_template_decl; int x_processing_specialization; bool x_processing_explicit_instantiation; int need_pop_function_context; bool skip_evaluation; struct stmt_tree_s x_stmt_tree; struct cp_binding_level *class_bindings; struct cp_binding_level *bindings; struct saved_scope *prev; }; /* The current open namespace. */ #define current_namespace scope_chain->old_namespace /* The stack for namespaces of current declarations. */ #define decl_namespace_list scope_chain->decl_ns_list /* IDENTIFIER_NODE: name of current class */ #define current_class_name scope_chain->class_name /* _TYPE: the type of the current class */ #define current_class_type scope_chain->class_type /* When parsing a class definition, the access specifier most recently given by the user, or, if no access specifier was given, the default value appropriate for the kind of class (i.e., struct, class, or union). */ #define current_access_specifier scope_chain->access_specifier /* Pointer to the top of the language name stack. */ #define current_lang_base scope_chain->lang_base #define current_lang_name scope_chain->lang_name /* Parsing a function declarator leaves a list of parameter names or a chain or parameter decls here. */ #define current_template_parms scope_chain->template_parms #define processing_template_decl scope_chain->x_processing_template_decl #define processing_specialization scope_chain->x_processing_specialization #define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation /* The cached class binding level, from the most recently exited class, or NULL if none. */ #define previous_class_level scope_chain->x_previous_class_level /* A list of private types mentioned, for deferred access checking. */ extern GTY(()) struct saved_scope *scope_chain; struct cxx_int_tree_map GTY(()) { unsigned int uid; tree to; }; extern unsigned int cxx_int_tree_map_hash (const void *); extern int cxx_int_tree_map_eq (const void *, const void *); /* Global state pertinent to the current function. */ struct language_function GTY(()) { struct c_language_function base; tree x_cdtor_label; tree x_current_class_ptr; tree x_current_class_ref; tree x_eh_spec_block; tree x_in_charge_parm; tree x_vtt_parm; tree x_return_value; int returns_value; int returns_null; int returns_abnormally; int in_function_try_handler; int in_base_initializer; /* True if this function can throw an exception. */ BOOL_BITFIELD can_throw : 1; htab_t GTY((param_is(struct named_label_entry))) x_named_labels; struct cp_binding_level *bindings; VEC(tree,gc) *x_local_names; htab_t GTY((param_is (struct cxx_int_tree_map))) extern_decl_map; }; /* The current C++-specific per-function global variables. */ #define cp_function_chain (cfun->language) /* In a constructor destructor, the point at which all derived class destroying/construction has been has been done. Ie. just before a constructor returns, or before any base class destroying will be done in a destructor. */ #define cdtor_label cp_function_chain->x_cdtor_label /* When we're processing a member function, current_class_ptr is the PARM_DECL for the `this' pointer. The current_class_ref is an expression for `*this'. */ #define current_class_ptr \ (cfun ? cp_function_chain->x_current_class_ptr : NULL_TREE) #define current_class_ref \ (cfun ? cp_function_chain->x_current_class_ref : NULL_TREE) /* The EH_SPEC_BLOCK for the exception-specifiers for the current function, if any. */ #define current_eh_spec_block cp_function_chain->x_eh_spec_block /* The `__in_chrg' parameter for the current function. Only used for constructors and destructors. */ #define current_in_charge_parm cp_function_chain->x_in_charge_parm /* The `__vtt_parm' parameter for the current function. Only used for constructors and destructors. */ #define current_vtt_parm cp_function_chain->x_vtt_parm /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ #define current_function_returns_value cp_function_chain->returns_value /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ #define current_function_returns_null cp_function_chain->returns_null /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ #define current_function_returns_abnormally \ cp_function_chain->returns_abnormally /* Nonzero if we are processing a base initializer. Zero elsewhere. */ #define in_base_initializer cp_function_chain->in_base_initializer #define in_function_try_handler cp_function_chain->in_function_try_handler /* Expression always returned from function, or error_mark_node otherwise, for use by the automatic named return value optimization. */ #define current_function_return_value \ (cp_function_chain->x_return_value) /* True if NAME is the IDENTIFIER_NODE for an overloaded "operator new" or "operator delete". */ #define NEW_DELETE_OPNAME_P(NAME) \ ((NAME) == ansi_opname (NEW_EXPR) \ || (NAME) == ansi_opname (VEC_NEW_EXPR) \ || (NAME) == ansi_opname (DELETE_EXPR) \ || (NAME) == ansi_opname (VEC_DELETE_EXPR)) #define ansi_opname(CODE) \ (operator_name_info[(int) (CODE)].identifier) #define ansi_assopname(CODE) \ (assignment_operator_name_info[(int) (CODE)].identifier) /* True if NODE is an erroneous expression. */ #define error_operand_p(NODE) \ ((NODE) == error_mark_node \ || ((NODE) && TREE_TYPE ((NODE)) == error_mark_node)) /* C++ language-specific tree codes. */ #define DEFTREECODE(SYM, NAME, TYPE, LENGTH) SYM, enum cplus_tree_code { CP_DUMMY_TREE_CODE = LAST_C_TREE_CODE, #include "cp-tree.def" LAST_CPLUS_TREE_CODE }; #undef DEFTREECODE /* TRUE if a tree code represents a statement. */ extern bool statement_code_p[MAX_TREE_CODES]; #define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)] enum languages { lang_c, lang_cplusplus, lang_java }; /* Macros to make error reporting functions' lives easier. */ #define TYPE_IDENTIFIER(NODE) (DECL_NAME (TYPE_NAME (NODE))) #define TYPE_LINKAGE_IDENTIFIER(NODE) \ (TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE))) #define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE))) #define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE))) /* Nonzero if NODE has no name for linkage purposes. */ #define TYPE_ANONYMOUS_P(NODE) \ (TAGGED_TYPE_P (NODE) && ANON_AGGRNAME_P (TYPE_LINKAGE_IDENTIFIER (NODE))) /* The _DECL for this _TYPE. */ #define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE))) /* Nonzero if T is a class (or struct or union) type. Also nonzero for template type parameters, typename types, and instantiated template template parameters. Keep these checks in ascending code order. */ #define MAYBE_CLASS_TYPE_P(T) \ (TREE_CODE (T) == TEMPLATE_TYPE_PARM \ || TREE_CODE (T) == TYPENAME_TYPE \ || TREE_CODE (T) == TYPEOF_TYPE \ || TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \ || TYPE_LANG_FLAG_5 (T)) /* Nonzero if T is a class (or struct or union) type. Also nonzero for template type parameters, typename types, and instantiated template template parameters. Despite its name, this macro has nothing to do with the definition of aggregate given in the standard. Think of this macro as MAYBE_CLASS_TYPE_P. Keep these checks in ascending code order. */ #define IS_AGGR_TYPE(T) \ (TREE_CODE (T) == TEMPLATE_TYPE_PARM \ || TREE_CODE (T) == TYPENAME_TYPE \ || TREE_CODE (T) == TYPEOF_TYPE \ || TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \ || TYPE_LANG_FLAG_5 (T)) /* Set IS_AGGR_TYPE for T to VAL. T must be a class, struct, or union type. */ #define SET_IS_AGGR_TYPE(T, VAL) \ (TYPE_LANG_FLAG_5 (T) = (VAL)) /* Nonzero if T is a class type. Zero for template type parameters, typename types, and so forth. */ #define CLASS_TYPE_P(T) \ (IS_AGGR_TYPE_CODE (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T)) /* Keep these checks in ascending code order. */ #define IS_AGGR_TYPE_CODE(T) \ ((T) == RECORD_TYPE || (T) == UNION_TYPE) #define TAGGED_TYPE_P(T) \ (CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE) #define IS_OVERLOAD_TYPE(T) TAGGED_TYPE_P (T) /* True if this a "Java" type, defined in 'extern "Java"'. */ #define TYPE_FOR_JAVA(NODE) TYPE_LANG_FLAG_3 (NODE) /* True if this type is dependent. This predicate is only valid if TYPE_DEPENDENT_P_VALID is true. */ #define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE) /* True if dependent_type_p has been called for this type, with the result that TYPE_DEPENDENT_P is valid. */ #define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE) /* Nonzero if this type is const-qualified. */ #define CP_TYPE_CONST_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0) /* Nonzero if this type is volatile-qualified. */ #define CP_TYPE_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0) /* Nonzero if this type is restrict-qualified. */ #define CP_TYPE_RESTRICT_P(NODE) \ ((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0) /* Nonzero if this type is const-qualified, but not volatile-qualified. Other qualifiers are ignored. This macro is used to test whether or not it is OK to bind an rvalue to a reference. */ #define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \ ((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \ == TYPE_QUAL_CONST) #define FUNCTION_ARG_CHAIN(NODE) \ TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES which refers to a user-written parameter. */ #define FUNCTION_FIRST_USER_PARMTYPE(NODE) \ skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE))) /* Similarly, but for DECL_ARGUMENTS. */ #define FUNCTION_FIRST_USER_PARM(NODE) \ skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE)) #define PROMOTES_TO_AGGR_TYPE(NODE, CODE) \ (((CODE) == TREE_CODE (NODE) \ && IS_AGGR_TYPE (TREE_TYPE (NODE))) \ || IS_AGGR_TYPE (NODE)) /* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and ambiguity issues. */ #define DERIVED_FROM_P(PARENT, TYPE) \ (lookup_base ((TYPE), (PARENT), ba_any, NULL) != NULL_TREE) /* Nonzero iff TYPE is uniquely derived from PARENT. Ignores accessibility. */ #define UNIQUELY_DERIVED_FROM_P(PARENT, TYPE) \ (lookup_base ((TYPE), (PARENT), ba_unique | ba_quiet, NULL) != NULL_TREE) /* Nonzero iff TYPE is publicly & uniquely derived from PARENT. */ #define PUBLICLY_UNIQUELY_DERIVED_P(PARENT, TYPE) \ (lookup_base ((TYPE), (PARENT), ba_ignore_scope | ba_check | ba_quiet, \ NULL) != NULL_TREE) /* Gives the visibility specification for a class type. */ #define CLASSTYPE_VISIBILITY(TYPE) \ DECL_VISIBILITY (TYPE_NAME (TYPE)) #define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \ DECL_VISIBILITY_SPECIFIED (TYPE_NAME (TYPE)) typedef struct tree_pair_s GTY (()) { tree purpose; tree value; } tree_pair_s; typedef tree_pair_s *tree_pair_p; DEF_VEC_O (tree_pair_s); DEF_VEC_ALLOC_O (tree_pair_s,gc); /* This is a few header flags for 'struct lang_type'. Actually, all but the first are used only for lang_type_class; they are put in this structure to save space. */ struct lang_type_header GTY(()) { BOOL_BITFIELD is_lang_type_class : 1; BOOL_BITFIELD has_type_conversion : 1; BOOL_BITFIELD has_init_ref : 1; BOOL_BITFIELD has_default_ctor : 1; BOOL_BITFIELD const_needs_init : 1; BOOL_BITFIELD ref_needs_init : 1; BOOL_BITFIELD has_const_assign_ref : 1; BOOL_BITFIELD spare : 1; }; /* This structure provides additional information above and beyond what is provide in the ordinary tree_type. In the past, we used it for the types of class types, template parameters types, typename types, and so forth. However, there can be many (tens to hundreds of thousands) of template parameter types in a compilation, and there's no need for this additional information in that case. Therefore, we now use this data structure only for class types. In the past, it was thought that there would be relatively few class types. However, in the presence of heavy use of templates, many (i.e., thousands) of classes can easily be generated. Therefore, we should endeavor to keep the size of this structure to a minimum. */ struct lang_type_class GTY(()) { struct lang_type_header h; unsigned char align; unsigned has_mutable : 1; unsigned com_interface : 1; unsigned non_pod_class : 1; unsigned nearly_empty_p : 1; unsigned user_align : 1; unsigned has_assign_ref : 1; unsigned has_new : 1; unsigned has_array_new : 1; unsigned gets_delete : 2; unsigned interface_only : 1; unsigned interface_unknown : 1; unsigned contains_empty_class_p : 1; unsigned anon_aggr : 1; unsigned non_zero_init : 1; unsigned empty_p : 1; unsigned vec_new_uses_cookie : 1; unsigned declared_class : 1; unsigned diamond_shaped : 1; unsigned repeated_base : 1; unsigned being_defined : 1; unsigned java_interface : 1; unsigned debug_requested : 1; unsigned fields_readonly : 1; unsigned use_template : 2; unsigned ptrmemfunc_flag : 1; unsigned was_anonymous : 1; unsigned lazy_default_ctor : 1; unsigned lazy_copy_ctor : 1; unsigned lazy_assignment_op : 1; unsigned lazy_destructor : 1; unsigned has_const_init_ref : 1; unsigned has_complex_init_ref : 1; unsigned has_complex_assign_ref : 1; unsigned non_aggregate : 1; /* KEY: 1 if the copy constructor is implicitly defined instead of user defined. */ unsigned has_implicit_copy_constructor : 1; /* When adding a flag here, consider whether or not it ought to apply to a template instance if it applies to the template. If so, make sure to copy it in instantiate_class_template! */ /* There are some bits left to fill out a 32-bit word. Keep track of this by updating the size of this bitfield whenever you add or remove a flag. */ unsigned dummy : 11; /* KEY: 12 in original GCC */ tree primary_base; VEC(tree_pair_s,gc) *vcall_indices; tree vtables; tree typeinfo_var; VEC(tree,gc) *vbases; binding_table nested_udts; tree as_base; VEC(tree,gc) *pure_virtuals; tree friend_classes; VEC(tree,gc) * GTY((reorder ("resort_type_method_vec"))) methods; tree key_method; tree decl_list; tree template_info; tree befriending_classes; /* In a RECORD_TYPE, information specific to Objective-C++, such as a list of adopted protocols or a pointer to a corresponding @interface. See objc/objc-act.h for details. */ tree objc_info; /* KEY: The copy constructor to use if the WHIRL translator needs to copy objects. */ tree copy_constructor; }; struct lang_type_ptrmem GTY(()) { struct lang_type_header h; tree record; }; struct lang_type GTY(()) { union lang_type_u { struct lang_type_header GTY((skip (""))) h; struct lang_type_class GTY((tag ("1"))) c; struct lang_type_ptrmem GTY((tag ("0"))) ptrmem; } GTY((desc ("%h.h.is_lang_type_class"))) u; }; #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define LANG_TYPE_CLASS_CHECK(NODE) __extension__ \ ({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \ if (! lt->u.h.is_lang_type_class) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.c; }) #define LANG_TYPE_PTRMEM_CHECK(NODE) __extension__ \ ({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \ if (lt->u.h.is_lang_type_class) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->u.ptrmem; }) #else #define LANG_TYPE_CLASS_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.c) #define LANG_TYPE_PTRMEM_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.ptrmem) #endif /* ENABLE_TREE_CHECKING */ /* Fields used for storing information before the class is defined. After the class is defined, these fields hold other information. */ /* VEC(tree) of friends which were defined inline in this class definition. */ #define CLASSTYPE_INLINE_FRIENDS(NODE) CLASSTYPE_PURE_VIRTUALS (NODE) /* Nonzero for _CLASSTYPE means that operator delete is defined. */ #define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete) #define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1) /* Nonzero if `new NODE[x]' should cause the allocation of extra storage to indicate how many array elements are in use. */ #define TYPE_VEC_NEW_USES_COOKIE(NODE) \ (CLASS_TYPE_P (NODE) \ && LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie) /* Nonzero means that this _CLASSTYPE node defines ways of converting itself to other types. */ #define TYPE_HAS_CONVERSION(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.has_type_conversion) /* Nonzero means that NODE (a class type) has a default constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor) /* Nonzero means that NODE (a class type) has a copy constructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_COPY_CTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor) /* Nonzero means that NODE (a class type) has an assignment operator -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_ASSIGNMENT_OP(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_assignment_op) /* Nonzero means that NODE (a class type) has a destructor -- but that it has not yet been declared. */ #define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor) /* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */ #define TYPE_HAS_ASSIGN_REF(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_assign_ref) /* True iff the class type NODE has an "operator =" whose parameter has a parameter of type "const X&". */ #define TYPE_HAS_CONST_ASSIGN_REF(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.has_const_assign_ref) /* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */ #define TYPE_HAS_INIT_REF(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->h.has_init_ref) #define TYPE_HAS_CONST_INIT_REF(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_const_init_ref) /* Nonzero if this class defines an overloaded operator new. (An operator new [] doesn't count.) */ #define TYPE_HAS_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_new) /* Nonzero if this class defines an overloaded operator new[]. */ #define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_array_new) /* Nonzero means that this type is being defined. I.e., the left brace starting the definition of this type has been seen. */ #define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined) /* Mark bits for repeated base checks. */ #define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE)) /* Nonzero if the class NODE has multiple paths to the same (virtual) base object. */ #define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped) /* Nonzero if the class NODE has multiple instances of the same base type. */ #define CLASSTYPE_REPEATED_BASE_P(NODE) \ (LANG_TYPE_CLASS_CHECK(NODE)->repeated_base) /* The member function with which the vtable will be emitted: the first noninline non-pure-virtual member function. NULL_TREE if there is no key function or if this is a class template */ #define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method) /* Vector member functions defined in this class. Each element is either a FUNCTION_DECL, a TEMPLATE_DECL, or an OVERLOAD. All functions with the same name end up in the same slot. The first two elements are for constructors, and destructors, respectively. All template conversion operators to innermost template dependent types are overloaded on the next slot, if they exist. Note, the names for these functions will not all be the same. The non-template conversion operators & templated conversions to non-innermost template types are next, followed by ordinary member functions. There may be empty entries at the end of the vector. The conversion operators are unsorted. The ordinary member functions are sorted, once the class is complete. */ #define CLASSTYPE_METHOD_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->methods) /* For class templates, this is a TREE_LIST of all member data, functions, types, and friends in the order of declaration. The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend, and the RECORD_TYPE for the class template otherwise. */ #define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list) /* The slot in the CLASSTYPE_METHOD_VEC where constructors go. */ #define CLASSTYPE_CONSTRUCTOR_SLOT 0 /* The slot in the CLASSTYPE_METHOD_VEC where destructors go. */ #define CLASSTYPE_DESTRUCTOR_SLOT 1 /* The first slot in the CLASSTYPE_METHOD_VEC where conversion operators can appear. */ #define CLASSTYPE_FIRST_CONVERSION_SLOT 2 /* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These are the constructors that take an in-charge parameter. */ #define CLASSTYPE_CONSTRUCTORS(NODE) \ (VEC_index (tree, CLASSTYPE_METHOD_VEC (NODE), CLASSTYPE_CONSTRUCTOR_SLOT)) /* A FUNCTION_DECL for the destructor for NODE. These are the destructors that take an in-charge parameter. If CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL until the destructor is created with lazily_declare_fn. */ #define CLASSTYPE_DESTRUCTORS(NODE) \ (CLASSTYPE_METHOD_VEC (NODE) \ ? VEC_index (tree, CLASSTYPE_METHOD_VEC (NODE), CLASSTYPE_DESTRUCTOR_SLOT) \ : NULL_TREE) /* A dictionary of the nested user-defined-types (class-types, or enums) found within this class. This table includes nested member class templates. */ #define CLASSTYPE_NESTED_UTDS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nested_udts) /* Nonzero if NODE has a primary base class, i.e., a base class with which it shares the virtual function table pointer. */ #define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \ (CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE) /* If non-NULL, this is the binfo for the primary base class, i.e., the base class which contains the virtual function table pointer for this class. */ #define CLASSTYPE_PRIMARY_BINFO(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->primary_base) /* A vector of BINFOs for the direct and indirect virtual base classes that this type uses in a post-order depth-first left-to-right order. (In other words, these bases appear in the order that they should be initialized.) */ #define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases) /* The type corresponding to NODE when NODE is used as a base class, i.e., NODE without virtual base classes. */ #define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base) /* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */ #define IS_FAKE_BASE_TYPE(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \ && CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE)) /* These are the size and alignment of the type without its virtual base classes, for when we use this type as a base itself. */ #define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE)) #define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE)) /* The alignment of NODE, without its virtual bases, in bytes. */ #define CLASSTYPE_ALIGN_UNIT(NODE) \ (CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT) /* True if this a Java interface type, declared with '__attribute__ ((java_interface))'. */ #define TYPE_JAVA_INTERFACE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->java_interface) /* A VEC(tree) of virtual functions which cannot be inherited by derived classes. When deriving from this type, the derived class must provide its own definition for each of these functions. */ #define CLASSTYPE_PURE_VIRTUALS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals) /* Nonzero means that this type has an X() constructor. */ #define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.has_default_ctor) /* Nonzero means that this type contains a mutable member. */ #define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable) #define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE)) /* Nonzero means that this class type is a non-POD class. */ #define CLASSTYPE_NON_POD_P(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class) /* Nonzero means that this class contains pod types whose default initialization is not a zero initialization (namely, pointers to data members). */ #define CLASSTYPE_NON_ZERO_INIT_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init) /* Nonzero if this class is "empty" in the sense of the C++ ABI. */ #define CLASSTYPE_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->empty_p) /* Nonzero if this class is "nearly empty", i.e., contains only a virtual function table pointer. */ #define CLASSTYPE_NEARLY_EMPTY_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p) /* Nonzero if this class contains an empty subobject. */ #define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p) /* A list of class types of which this type is a friend. The TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the case of a template friend. */ #define CLASSTYPE_FRIEND_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->friend_classes) /* A list of the classes which grant friendship to this class. */ #define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes) /* Say whether this node was declared as a "class" or a "struct". */ #define CLASSTYPE_DECLARED_CLASS(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->declared_class) /* Nonzero if this class has const members which have no specified initialization. */ #define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init : 0) #define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init = (VALUE)) /* Nonzero if this class has ref members which have no specified initialization. */ #define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \ (TYPE_LANG_SPECIFIC (NODE) \ ? LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init : 0) #define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \ (LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init = (VALUE)) /* Nonzero if this class is included from a header file which employs `#pragma interface', and it is not included in its implementation file. */ #define CLASSTYPE_INTERFACE_ONLY(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_only) /* True if we have already determined whether or not vtables, VTTs, typeinfo, and other similar per-class data should be emitted in this translation unit. This flag does not indicate whether or not these items should be emitted; it only indicates that we know one way or the other. */ #define CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0) /* The opposite of CLASSTYPE_INTERFACE_KNOWN. */ #define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown) #define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X)) #define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1) #define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0) /* Nonzero if a _DECL node requires us to output debug info for this class. */ #define CLASSTYPE_DEBUG_REQUESTED(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->debug_requested) /* Additional macros for inheritance information. */ /* Nonzero means that this class is on a path leading to a new vtable. */ #define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE) /* Nonzero means B (a BINFO) has its own vtable. Any copies will not have this flag set. */ #define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B)) /* Compare a BINFO_TYPE with another type for equality. For a binfo, this is functionally equivalent to using same_type_p, but measurably faster. At least one of the arguments must be a BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If BINFO_TYPE(T) ever stops being the main variant of the class the binfo is for, this macro must change. */ #define SAME_BINFO_TYPE_P(A, B) ((A) == (B)) /* Any subobject that needs a new vtable must have a vptr and must not be a non-virtual primary base (since it would then use the vtable from a derived class and never become non-primary.) */ #define SET_BINFO_NEW_VTABLE_MARKED(B) \ (BINFO_NEW_VTABLE_MARKED (B) = 1, \ gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \ gcc_assert (TYPE_VFIELD (BINFO_TYPE (B)))) /* Nonzero if this binfo is for a dependent base - one that should not be searched. */ #define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE) /* Nonzero if this binfo has lost its primary base binfo (because that is a nearly-empty virtual base that has been taken by some other base in the complete hierarchy. */ #define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE) /* Nonzero if this BINFO is a primary base class. */ #define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE) /* Used by various search routines. */ #define IDENTIFIER_MARKED(NODE) TREE_LANG_FLAG_0 (NODE) /* A VEC(tree_pair_s) of the vcall indices associated with the class NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual function. The VALUE is the index into the virtual table where the vcall offset for that function is stored, when NODE is a virtual base. */ #define CLASSTYPE_VCALL_INDICES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices) /* The various vtables for the class NODE. The primary vtable will be first, followed by the construction vtables and VTT, if any. */ #define CLASSTYPE_VTABLES(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->vtables) /* The std::type_info variable representing this class, or NULL if no such variable has been created. This field is only set for the TYPE_MAIN_VARIANT of the class. */ #define CLASSTYPE_TYPEINFO_VAR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var) /* Accessor macros for the BINFO_VIRTUALS list. */ /* The number of bytes by which to adjust the `this' pointer when calling this virtual function. Subtract this value from the this pointer. Always non-NULL, might be constant zero though. */ #define BV_DELTA(NODE) (TREE_PURPOSE (NODE)) /* If non-NULL, the vtable index at which to find the vcall offset when calling this virtual function. Add the value at that vtable index to the this pointer. */ #define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE)) /* The function to call. */ #define BV_FN(NODE) (TREE_VALUE (NODE)) /* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE will be NULL_TREE to indicate a throw specification of `()', or no exceptions allowed. */ #define TYPE_RAISES_EXCEPTIONS(NODE) TYPE_LANG_SLOT_1 (NODE) /* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()'. */ #define TYPE_NOTHROW_P(NODE) \ (TYPE_RAISES_EXCEPTIONS (NODE) \ && TREE_VALUE (TYPE_RAISES_EXCEPTIONS (NODE)) == NULL_TREE) /* The binding level associated with the namespace. */ #define NAMESPACE_LEVEL(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.u.level) /* If a DECL has DECL_LANG_SPECIFIC, it is either a lang_decl_flags or a lang_decl (which has lang_decl_flags as its initial prefix). This macro is nonzero for tree nodes whose DECL_LANG_SPECIFIC is the full lang_decl, and not just lang_decl_flags. Keep these checks in ascending code order. */ #define CAN_HAVE_FULL_LANG_DECL_P(NODE) \ (!(TREE_CODE (NODE) == FIELD_DECL \ || TREE_CODE (NODE) == VAR_DECL \ || TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == USING_DECL)) struct lang_decl_flags GTY(()) { ENUM_BITFIELD(languages) language : 4; unsigned global_ctor_p : 1; unsigned global_dtor_p : 1; unsigned anticipated_p : 1; unsigned template_conv_p : 1; unsigned operator_attr : 1; unsigned constructor_attr : 1; unsigned destructor_attr : 1; unsigned friend_attr : 1; unsigned static_function : 1; unsigned pure_virtual : 1; unsigned has_in_charge_parm_p : 1; unsigned has_vtt_parm_p : 1; unsigned deferred : 1; unsigned use_template : 2; unsigned nonconverting : 1; unsigned not_really_extern : 1; unsigned initialized_in_class : 1; unsigned assignment_operator_p : 1; unsigned u1sel : 1; unsigned u2sel : 1; unsigned can_be_full : 1; unsigned thunk_p : 1; unsigned this_thunk_p : 1; unsigned repo_available_p : 1; unsigned hidden_friend_p : 1; unsigned threadprivate_p : 1; /* One unused bit. */ union lang_decl_u { /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is THUNK_ALIAS. In a FUNCTION_DECL for which DECL_THUNK_P does not hold, VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is DECL_TEMPLATE_INFO. */ tree GTY ((tag ("0"))) template_info; /* In a NAMESPACE_DECL, this is NAMESPACE_LEVEL. */ struct cp_binding_level * GTY ((tag ("1"))) level; } GTY ((desc ("%1.u1sel"))) u; union lang_decl_u2 { /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is THUNK_VIRTUAL_OFFSET. Otherwise this is DECL_ACCESS. */ tree GTY ((tag ("0"))) access; /* For VAR_DECL in function, this is DECL_DISCRIMINATOR. */ int GTY ((tag ("1"))) discriminator; } GTY ((desc ("%1.u2sel"))) u2; }; /* sorted_fields is sorted based on a pointer, so we need to be able to resort it if pointers get rearranged. */ struct lang_decl GTY(()) { struct lang_decl_flags decl_flags; union lang_decl_u4 { struct full_lang_decl { /* In an overloaded operator, this is the value of DECL_OVERLOADED_OPERATOR_P. */ ENUM_BITFIELD (tree_code) operator_code : 8; unsigned u3sel : 1; unsigned pending_inline_p : 1; unsigned spare : 22; /* For a non-thunk function decl, this is a tree list of friendly classes. For a thunk function decl, it is the thunked to function decl. */ tree befriending_classes; /* For a non-virtual FUNCTION_DECL, this is DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both this pointer and result pointer adjusting thunks are chained here. This pointer thunks to return pointer thunks will be chained on the return pointer thunk. */ tree context; union lang_decl_u5 { /* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is DECL_CLONED_FUNCTION. */ tree GTY ((tag ("0"))) cloned_function; /* In a FUNCTION_DECL for which THUNK_P holds this is the THUNK_FIXED_OFFSET. */ HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset; } GTY ((desc ("%0.decl_flags.thunk_p"))) u5; union lang_decl_u3 { struct sorted_fields_type * GTY ((tag ("0"), reorder ("resort_sorted_fields"))) sorted_fields; struct cp_token_cache * GTY ((tag ("2"))) pending_inline_info; struct language_function * GTY ((tag ("1"))) saved_language_function; } GTY ((desc ("%1.u3sel + %1.pending_inline_p"))) u; /* If the named return value optimization is applied to the function, this is the VAR_DECL of the named return object. */ tree named_return_object; /* KEY */ } GTY ((tag ("1"))) f; } GTY ((desc ("%1.decl_flags.can_be_full"))) u; }; #if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007) #define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \ ({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \ if (lt->decl_flags.u2sel != TF) \ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \ &lt->decl_flags.u2; }) #else #define LANG_DECL_U2_CHECK(NODE, TF) \ (&DECL_LANG_SPECIFIC (NODE)->decl_flags.u2) #endif /* ENABLE_TREE_CHECKING */ /* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the declaration. Some entities (like a member function in a local class, or a local variable) do not have linkage at all, and this macro should not be used in those cases. Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was created by language-independent code, and has C linkage. Most VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */ #define DECL_LANGUAGE(NODE) \ (DECL_LANG_SPECIFIC (NODE) \ ? DECL_LANG_SPECIFIC (NODE)->decl_flags.language \ : (TREE_CODE (NODE) == FUNCTION_DECL \ ? lang_c : lang_cplusplus)) /* Set the language linkage for NODE to LANGUAGE. */ #define SET_DECL_LANGUAGE(NODE, LANGUAGE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.language = (LANGUAGE)) /* For FUNCTION_DECLs: nonzero means that this function is a constructor. */ #define DECL_CONSTRUCTOR_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.constructor_attr) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete object. */ #define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == complete_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base object. */ #define DECL_BASE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == base_ctor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the specialized in-charge constructor or the specialized not-in-charge constructor. */ #define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && !DECL_CLONED_FUNCTION_P (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */ #define DECL_COPY_CONSTRUCTOR_P(NODE) \ (DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0) /* Nonzero if NODE is a destructor. */ #define DECL_DESTRUCTOR_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.destructor_attr) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the specialized in-charge constructor, in-charge deleting constructor, or the base destructor. */ #define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \ (DECL_DESTRUCTOR_P (NODE) && !DECL_CLONED_FUNCTION_P (NODE)) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object. */ #define DECL_COMPLETE_DESTRUCTOR_P(NODE) \ (DECL_DESTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == complete_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base object. */ #define DECL_BASE_DESTRUCTOR_P(NODE) \ (DECL_DESTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == base_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete object that deletes the object after it has been destroyed. */ #define DECL_DELETING_DESTRUCTOR_P(NODE) \ (DECL_DESTRUCTOR_P (NODE) \ && DECL_NAME (NODE) == deleting_dtor_identifier) /* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or destructor. */ #define DECL_CLONED_FUNCTION_P(NODE) \ ((TREE_CODE (NODE) == FUNCTION_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL) \ && DECL_LANG_SPECIFIC (NODE) \ && !DECL_LANG_SPECIFIC (NODE)->decl_flags.thunk_p \ && DECL_CLONED_FUNCTION (NODE) != NULL_TREE) /* If DECL_CLONED_FUNCTION_P holds, this is the function that was cloned. */ #define DECL_CLONED_FUNCTION(NODE) \ (DECL_LANG_SPECIFIC (NON_THUNK_FUNCTION_CHECK(NODE))->u.f.u5.cloned_function) /* Perform an action for each clone of FN, if FN is a function with clones. This macro should be used like: FOR_EACH_CLONE (clone, fn) { ... } */ #define FOR_EACH_CLONE(CLONE, FN) \ if (TREE_CODE (FN) == FUNCTION_DECL \ && (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \ || DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))) \ for (CLONE = TREE_CHAIN (FN); \ CLONE && DECL_CLONED_FUNCTION_P (CLONE); \ CLONE = TREE_CHAIN (CLONE)) #ifdef KEY #define DECL_NAMED_RETURN_OBJECT(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.f.named_return_object) #endif /* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */ #define DECL_DISCRIMINATOR_P(NODE) \ (TREE_CODE (NODE) == VAR_DECL \ && DECL_FUNCTION_SCOPE_P (NODE)) /* Discriminator for name mangling. */ #define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator) /* Nonzero if the VTT parm has been added to NODE. */ #define DECL_HAS_VTT_PARM_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.has_vtt_parm_p) /* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is required. */ #define DECL_NEEDS_VTT_PARM_P(NODE) \ (CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \ && (DECL_BASE_CONSTRUCTOR_P (NODE) \ || DECL_BASE_DESTRUCTOR_P (NODE))) /* Nonzero if NODE is a user-defined conversion operator. */ #define DECL_CONV_FN_P(NODE) \ (DECL_NAME (NODE) && IDENTIFIER_TYPENAME_P (DECL_NAME (NODE))) /* If FN is a conversion operator, the type to which it converts. Otherwise, NULL_TREE. */ #define DECL_CONV_FN_TYPE(FN) \ (DECL_CONV_FN_P (FN) ? TREE_TYPE (DECL_NAME (FN)) : NULL_TREE) /* Nonzero if NODE, which is a TEMPLATE_DECL, is a template conversion operator to a type dependent on the innermost template args. */ #define DECL_TEMPLATE_CONV_FN_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.template_conv_p) /* Set the overloaded operator code for NODE to CODE. */ #define SET_OVERLOADED_OPERATOR_CODE(NODE, CODE) \ (DECL_LANG_SPECIFIC (NODE)->u.f.operator_code = (CODE)) /* If NODE is an overloaded operator, then this returns the TREE_CODE associated with the overloaded operator. DECL_ASSIGNMENT_OPERATOR_P must also be checked to determine whether or not NODE is an assignment operator. If NODE is not an overloaded operator, ERROR_MARK is returned. Since the numerical value of ERROR_MARK is zero, this macro can be used as a predicate to test whether or not NODE is an overloaded operator. */ #define DECL_OVERLOADED_OPERATOR_P(NODE) \ (IDENTIFIER_OPNAME_P (DECL_NAME (NODE)) \ ? DECL_LANG_SPECIFIC (NODE)->u.f.operator_code : ERROR_MARK) /* Nonzero if NODE is an assignment operator. */ #define DECL_ASSIGNMENT_OPERATOR_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.assignment_operator_p) /* For FUNCTION_DECLs: nonzero means that this function is a constructor or a destructor with an extra in-charge parameter to control whether or not virtual bases are constructed. */ #define DECL_HAS_IN_CHARGE_PARM_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.has_in_charge_parm_p) /* Nonzero if DECL is a declaration of __builtin_constant_p. */ #define DECL_IS_BUILTIN_CONSTANT_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \ && DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P) /* Nonzero for _DECL means that this decl appears in (or will appear in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for detecting circularity in case members are multiply defined. In the case of a VAR_DECL, it is also used to determine how program storage should be allocated. */ #define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE)) /* Nonzero for a VAR_DECL means that the variable's initialization (if any) has been processed. (In general, DECL_INITIALIZED_P is !DECL_EXTERN, but static data members may be initialized even if not defined.) */ #define DECL_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL iff an explicit initializer was provided. */ #define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \ (TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL that was initialized with a constant-expression. */ #define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \ (TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE))) /* Nonzero for a VAR_DECL that can be used in an integral constant expression. [expr.const] An integral constant-expression can only involve ... const variables of static or enumeration types initialized with constant expressions ... The standard does not require that the expression be non-volatile. G++ implements the proposed correction in DR 457. */ #define DECL_INTEGRAL_CONSTANT_VAR_P(NODE) \ (TREE_CODE (NODE) == VAR_DECL \ && CP_TYPE_CONST_NON_VOLATILE_P (TREE_TYPE (NODE)) \ && INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (NODE)) \ && DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (NODE)) /* Nonzero if the DECL was initialized in the class definition itself, rather than outside the class. This is used for both static member VAR_DECLS, and FUNTION_DECLS that are defined in the class. */ #define DECL_INITIALIZED_IN_CLASS_P(DECL) \ (DECL_LANG_SPECIFIC (DECL)->decl_flags.initialized_in_class) /* Nonzero for DECL means that this decl is just a friend declaration, and should not be added to the list of members for this class. */ #define DECL_FRIEND_P(NODE) (DECL_LANG_SPECIFIC (NODE)->decl_flags.friend_attr) /* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */ #define DECL_BEFRIENDING_CLASSES(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.f.befriending_classes) /* Nonzero for FUNCTION_DECL means that this decl is a static member function. */ #define DECL_STATIC_FUNCTION_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.static_function) /* Nonzero for FUNCTION_DECL means that this decl is a non-static member function. */ #define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \ (TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE) /* Nonzero for FUNCTION_DECL means that this decl is a member function (static or non-static). */ #define DECL_FUNCTION_MEMBER_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE)) /* Nonzero for FUNCTION_DECL means that this member function has `this' as const X *const. */ #define DECL_CONST_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for FUNCTION_DECL means that this member function has `this' as volatile X *const. */ #define DECL_VOLATILE_MEMFUNC_P(NODE) \ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \ && CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \ (TYPE_ARG_TYPES (TREE_TYPE (NODE)))))) /* Nonzero for a DECL means that this member is a non-static member. */ #define DECL_NONSTATIC_MEMBER_P(NODE) \ ((TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE)) \ || TREE_CODE (NODE) == FIELD_DECL) /* Nonzero for _DECL means that this member object type is mutable. */ #define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE)) /* Nonzero for _DECL means that this constructor is a non-converting constructor. */ #define DECL_NONCONVERTING_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.nonconverting) /* Nonzero for FUNCTION_DECL means that this member function is a pure virtual function. */ #define DECL_PURE_VIRTUAL_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.pure_virtual) /* True (in a FUNCTION_DECL) if NODE is a virtual function that is an invalid overrider for a function from a base class. Once we have complained about an invalid overrider we avoid complaining about it again. */ #define DECL_INVALID_OVERRIDER_P(NODE) \ (DECL_LANG_FLAG_4 (NODE)) /* The thunks associated with NODE, a FUNCTION_DECL. */ #define DECL_THUNKS(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.f.context) /* Nonzero if NODE is a thunk, rather than an ordinary function. */ #define DECL_THUNK_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL \ && DECL_LANG_SPECIFIC (NODE) \ && DECL_LANG_SPECIFIC (NODE)->decl_flags.thunk_p) /* Set DECL_THUNK_P for node. */ #define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.thunk_p = 1, \ DECL_LANG_SPECIFIC (NODE)->u.f.u3sel = 1, \ DECL_LANG_SPECIFIC (NODE)->decl_flags.this_thunk_p = (THIS_ADJUSTING)) /* Nonzero if NODE is a this pointer adjusting thunk. */ #define DECL_THIS_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && DECL_LANG_SPECIFIC (NODE)->decl_flags.this_thunk_p) /* Nonzero if NODE is a result pointer adjusting thunk. */ #define DECL_RESULT_THUNK_P(NODE) \ (DECL_THUNK_P (NODE) && !DECL_LANG_SPECIFIC (NODE)->decl_flags.this_thunk_p) /* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */ #define DECL_NON_THUNK_FUNCTION_P(NODE) \ (TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE)) /* Nonzero if NODE is `extern "C"'. */ #define DECL_EXTERN_C_P(NODE) \ (DECL_LANGUAGE (NODE) == lang_c) /* Nonzero if NODE is an `extern "C"' function. */ #define DECL_EXTERN_C_FUNCTION_P(NODE) \ (DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE)) /* True iff DECL is an entity with vague linkage whose definition is available in this translation unit. */ #define DECL_REPO_AVAILABLE_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.repo_available_p) /* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a template function. */ #define DECL_PRETTY_FUNCTION_P(NODE) \ (TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))) /* The _TYPE context in which this _DECL appears. This field holds the class where a virtual function instance is actually defined. */ #define DECL_CLASS_CONTEXT(NODE) \ (DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE) /* For a non-member friend function, the class (if any) in which this friend was defined. For example, given: struct S { friend void f (); }; the DECL_FRIEND_CONTEXT for `f' will be `S'. */ #define DECL_FRIEND_CONTEXT(NODE) \ ((DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \ ? DECL_LANG_SPECIFIC (NODE)->u.f.context \ : NULL_TREE) /* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */ #define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \ (DECL_LANG_SPECIFIC (NODE)->u.f.context = (CONTEXT)) /* NULL_TREE in DECL_CONTEXT represents the global namespace. */ #define CP_DECL_CONTEXT(NODE) \ (DECL_CONTEXT (NODE) ? DECL_CONTEXT (NODE) : global_namespace) #define CP_TYPE_CONTEXT(NODE) \ (TYPE_CONTEXT (NODE) ? TYPE_CONTEXT (NODE) : global_namespace) #define FROB_CONTEXT(NODE) ((NODE) == global_namespace ? NULL_TREE : (NODE)) /* 1 iff NODE has namespace scope, including the global namespace. */ #define DECL_NAMESPACE_SCOPE_P(NODE) \ (!DECL_TEMPLATE_PARM_P (NODE) \ && TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL) #define TYPE_NAMESPACE_SCOPE_P(NODE) \ (TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL) /* 1 iff NODE is a class member. */ #define DECL_CLASS_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE))) #define TYPE_CLASS_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE))) /* 1 iff NODE is function-local. */ #define DECL_FUNCTION_SCOPE_P(NODE) \ (DECL_CONTEXT (NODE) \ && TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL) #define TYPE_FUNCTION_SCOPE_P(NODE) \ (TYPE_CONTEXT (NODE) \ && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL) /* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for both the primary typeinfo object and the associated NTBS name. */ #define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)) /* 1 iff VAR_DECL node NODE is virtual table or VTT. */ #define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE)) /* Returns 1 iff VAR_DECL is a construction virtual table. DECL_VTABLE_OR_VTT_P will be true in this case and must be checked before using this macro. */ #define DECL_CONSTRUCTION_VTABLE_P(NODE) \ TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE)) /* 1 iff NODE is function-local, but for types. */ #define LOCAL_CLASS_P(NODE) \ (decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE) /* For a NAMESPACE_DECL: the list of using namespace directives The PURPOSE is the used namespace, the value is the namespace that is the common ancestor. */ #define DECL_NAMESPACE_USING(NODE) DECL_VINDEX (NAMESPACE_DECL_CHECK (NODE)) /* In a NAMESPACE_DECL, the DECL_INITIAL is used to record all users of a namespace, to record the transitive closure of using namespace. */ #define DECL_NAMESPACE_USERS(NODE) DECL_INITIAL (NAMESPACE_DECL_CHECK (NODE)) /* In a NAMESPACE_DECL, the list of namespaces which have associated themselves with this one. */ #define DECL_NAMESPACE_ASSOCIATIONS(NODE) \ (NAMESPACE_DECL_CHECK (NODE)->decl_non_common.saved_tree) /* In a NAMESPACE_DECL, points to the original namespace if this is a namespace alias. */ #define DECL_NAMESPACE_ALIAS(NODE) \ DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE)) #define ORIGINAL_NAMESPACE(NODE) \ (DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE)) /* Nonzero if NODE is the std namespace. */ #define DECL_NAMESPACE_STD_P(NODE) \ (TREE_CODE (NODE) == NAMESPACE_DECL \ && CP_DECL_CONTEXT (NODE) == global_namespace \ && DECL_NAME (NODE) == std_identifier) /* In a TREE_LIST concatenating using directives, indicate indirect directives */ #define TREE_INDIRECT_USING(NODE) (TREE_LIST_CHECK (NODE)->common.lang_flag_0) extern tree decl_shadowed_for_var_lookup (tree); extern void decl_shadowed_for_var_insert (tree, tree); /* Non zero if this is a using decl for a dependent scope. */ #define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE)) /* The scope named in a using decl. */ #define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE)) /* The decls named by a using decl. */ #define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE)) /* In a VAR_DECL, true if we have a shadowed local variable in the shadowed var table for this VAR_DECL. */ #define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \ (VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p) /* In a VAR_DECL for a variable declared in a for statement, this is the shadowed (local) variable. */ #define DECL_SHADOWED_FOR_VAR(NODE) \ (DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL) #define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \ (decl_shadowed_for_var_insert (NODE, VAL)) /* In a FUNCTION_DECL, this is nonzero if this function was defined in the class definition. We have saved away the text of the function, but have not yet processed it. */ #define DECL_PENDING_INLINE_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.f.pending_inline_p) /* If DECL_PENDING_INLINE_P holds, this is the saved text of the function. */ #define DECL_PENDING_INLINE_INFO(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.f.u.pending_inline_info) /* For a TYPE_DECL: if this structure has many fields, we'll sort them and put them into a TREE_VEC. */ #define DECL_SORTED_FIELDS(NODE) \ (DECL_LANG_SPECIFIC (TYPE_DECL_CHECK (NODE))->u.f.u.sorted_fields) /* True if on the deferred_fns (see decl2.c) list. */ #define DECL_DEFERRED_FN(DECL) \ (DECL_LANG_SPECIFIC (DECL)->decl_flags.deferred) /* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or TEMPLATE_DECL, the entity is either a template specialization (if DECL_USE_TEMPLATE is non-zero) or the abstract instance of the template itself. In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a specialization or abstract instance. The TREE_VALUE is the template arguments used to specialize the template. Consider: template <typename T> struct S { friend void f(T) {} }; In this case, S<int>::f is, from the point of view of the compiler, an instantiation of a template -- but, from the point of view of the language, each instantiation of S results in a wholly unrelated global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f will be non-NULL, but DECL_USE_TEMPLATE will be zero. */ #define DECL_TEMPLATE_INFO(NODE) \ (DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK (NODE)) \ ->decl_flags.u.template_info) /* For a VAR_DECL, indicates that the variable has been processed. This flag is set and unset throughout the code; it is always used for a temporary purpose. */ #define DECL_VAR_MARKED_P(NODE) \ (DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))) /* Template information for a RECORD_TYPE or UNION_TYPE. */ #define CLASSTYPE_TEMPLATE_INFO(NODE) \ (LANG_TYPE_CLASS_CHECK (RECORD_OR_UNION_CHECK (NODE))->template_info) /* Template information for an ENUMERAL_TYPE. Although an enumeration may not be a primary template, it may be declared within the scope of a primary template and the enumeration constants may depend on non-type template parameters. */ #define ENUM_TEMPLATE_INFO(NODE) \ (TYPE_LANG_SLOT_1 (ENUMERAL_TYPE_CHECK (NODE))) /* Template information for a template template parameter. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \ (LANG_TYPE_CLASS_CHECK (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)) \ ->template_info) /* Template information for an ENUMERAL_, RECORD_, or UNION_TYPE. */ #define TYPE_TEMPLATE_INFO(NODE) \ (TREE_CODE (NODE) == ENUMERAL_TYPE \ ? ENUM_TEMPLATE_INFO (NODE) : \ (TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM \ ? TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (NODE) : \ (TYPE_LANG_SPECIFIC (NODE) \ ? CLASSTYPE_TEMPLATE_INFO (NODE) \ : NULL_TREE))) /* Set the template information for an ENUMERAL_, RECORD_, or UNION_TYPE to VAL. */ #define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \ (TREE_CODE (NODE) == ENUMERAL_TYPE \ ? (ENUM_TEMPLATE_INFO (NODE) = (VAL)) \ : (CLASSTYPE_TEMPLATE_INFO (NODE) = (VAL))) #define TI_TEMPLATE(NODE) (TREE_PURPOSE (NODE)) #define TI_ARGS(NODE) (TREE_VALUE (NODE)) #define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE) /* We use TREE_VECs to hold template arguments. If there is only one level of template arguments, then the TREE_VEC contains the arguments directly. If there is more than one level of template arguments, then each entry in the TREE_VEC is itself a TREE_VEC, containing the template arguments for a single level. The first entry in the outer TREE_VEC is the outermost level of template parameters; the last is the innermost. It is incorrect to ever form a template argument vector containing only one level of arguments, but which is a TREE_VEC containing as its only entry the TREE_VEC for that level. */ /* Nonzero if the template arguments is actually a vector of vectors, rather than just a vector. */ #define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \ (NODE && TREE_VEC_ELT (NODE, 0) \ && TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC) /* The depth of a template argument vector. When called directly by the parser, we use a TREE_LIST rather than a TREE_VEC to represent template arguments. In fact, we may even see NULL_TREE if there are no template arguments. In both of those cases, there is only one level of template arguments. */ #define TMPL_ARGS_DEPTH(NODE) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1) /* The LEVELth level of the template ARGS. The outermost level of args is level 1, not level 0. */ #define TMPL_ARGS_LEVEL(ARGS, LEVEL) \ (TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \ ? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS)) /* Set the LEVELth level of the template ARGS to VAL. This macro does not work with single-level argument vectors. */ #define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \ (TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL)) /* Accesses the IDXth parameter in the LEVELth level of the ARGS. */ #define TMPL_ARG(ARGS, LEVEL, IDX) \ (TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX)) /* Given a single level of template arguments in NODE, return the number of arguments. */ #define NUM_TMPL_ARGS(NODE) \ (TREE_VEC_LENGTH (NODE)) /* Returns the innermost level of template arguments in ARGS. */ #define INNERMOST_TEMPLATE_ARGS(NODE) \ (get_innermost_template_args ((NODE), 1)) /* The number of levels of template parameters given by NODE. */ #define TMPL_PARMS_DEPTH(NODE) \ ((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE))) /* The TEMPLATE_DECL instantiated or specialized by NODE. This TEMPLATE_DECL will be the immediate parent, not the most general template. For example, in: template <class T> struct S { template <class U> void f(U); } the FUNCTION_DECL for S<int>::f<double> will have, as its DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'. As a special case, for a member friend template of a template class, this value will not be a TEMPLATE_DECL, but rather an IDENTIFIER_NODE or OVERLOAD indicating the name of the template and any explicit template arguments provided. For example, in: template <class T> struct S { friend void f<int>(int, double); } the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the DECL_TI_ARGS will be {int}. */ #define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE)) /* The template arguments used to obtain this decl from the most general form of DECL_TI_TEMPLATE. For the example given for DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These are always the full set of arguments required to instantiate this declaration from the most general template specialized here. */ #define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE)) /* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE will be generated from a partial specialization, the TEMPLATE_DECL referred to here will be the original template. For example, given: template <typename T> struct S {}; template <typename T> struct S<T*> {}; the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */ #define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE)) #define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE)) /* For a template instantiation TYPE, returns the TYPE corresponding to the primary template. Otherwise returns TYPE itself. */ #define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \ ((CLASSTYPE_USE_TEMPLATE ((TYPE)) \ && !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \ ? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \ (CLASSTYPE_TI_TEMPLATE ((TYPE))))) \ : (TYPE)) /* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */ #define TYPE_TI_TEMPLATE(NODE) \ (TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE))) /* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */ #define TYPE_TI_ARGS(NODE) \ (TI_ARGS (TYPE_TEMPLATE_INFO (NODE))) #define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE) /* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the sense of [temp.mem]. */ #define DECL_MEMBER_TEMPLATE_P(NODE) \ (DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE))) /* Nonzero if the NODE corresponds to the template parameters for a member template, whose inline definition is being processed after the class definition is complete. */ #define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE) /* In a FUNCTION_DECL, the saved language-specific per-function data. */ #define DECL_SAVED_FUNCTION_DATA(NODE) \ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (NODE)) \ ->u.f.u.saved_language_function) /* Indicates an indirect_expr is for converting a reference. */ #define REFERENCE_REF_P(NODE) \ TREE_LANG_FLAG_0 (INDIRECT_REF_CHECK (NODE)) #define NEW_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_GLOBAL(NODE) \ TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE)) #define DELETE_EXPR_USE_VEC(NODE) \ TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE)) /* Indicates that this is a non-dependent COMPOUND_EXPR which will resolve to a function call. */ #define COMPOUND_EXPR_OVERLOADED(NODE) \ TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE)) /* In a CALL_EXPR appearing in a template, true if Koenig lookup should be performed at instantiation time. */ #define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE)) /* Indicates whether a string literal has been parenthesized. Such usages are disallowed in certain circumstances. */ #define PAREN_STRING_LITERAL_P(NODE) \ TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE)) /* Nonzero if this AGGR_INIT_EXPR provides for initialization via a constructor call, rather than an ordinary function call. */ #define AGGR_INIT_VIA_CTOR_P(NODE) \ TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE)) /* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a TEMPLATE_DECL. This macro determines whether or not a given class type is really a template type, as opposed to an instantiation or specialization of one. */ #define CLASSTYPE_IS_TEMPLATE(NODE) \ (CLASSTYPE_TEMPLATE_INFO (NODE) \ && !CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE))) /* The name used by the user to name the typename type. Typically, this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the corresponding TYPE_DECL. However, this may also be a TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */ #define TYPENAME_TYPE_FULLNAME(NODE) (TYPENAME_TYPE_CHECK (NODE))->type.values /* True if a TYPENAME_TYPE was declared as an "enum". */ #define TYPENAME_IS_ENUM_P(NODE) \ (TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE))) /* True if a TYPENAME_TYPE was declared as a "class", "struct", or "union". */ #define TYPENAME_IS_CLASS_P(NODE) \ (TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE))) /* Nonzero in INTEGER_CST means that this int is negative by dint of using a twos-complement negated operand. */ #define TREE_NEGATED_INT(NODE) TREE_LANG_FLAG_0 (INTEGER_CST_CHECK (NODE)) /* [class.virtual] A class that declares or inherits a virtual function is called a polymorphic class. */ #define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE)) /* Nonzero if this class has a virtual function table pointer. */ #define TYPE_CONTAINS_VPTR_P(NODE) \ (TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE)) /* This flag is true of a local VAR_DECL if it was declared in a for statement, but we are no longer in the scope of the for. */ #define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE)) /* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL if we already emitted a warning about using it. */ #define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)) /* Nonzero if NODE is a FUNCTION_DECL (for a function with global scope) declared in a local scope. */ #define DECL_LOCAL_FUNCTION_P(NODE) \ DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE)) /* Nonzero if NODE is a DECL which we know about but which has not been explicitly declared, such as a built-in function or a friend declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P will be set. */ #define DECL_ANTICIPATED(NODE) \ (DECL_LANG_SPECIFIC (DECL_COMMON_CHECK (NODE))->decl_flags.anticipated_p) /* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend within a class but has not been declared in the surrounding scope. The function is invisible except via argument dependent lookup. */ #define DECL_HIDDEN_FRIEND_P(NODE) \ (DECL_LANG_SPECIFIC (DECL_COMMON_CHECK (NODE))->decl_flags.hidden_friend_p) /* Nonzero if DECL has been declared threadprivate by #pragma omp threadprivate. */ #define CP_DECL_THREADPRIVATE_P(DECL) \ (DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->decl_flags.threadprivate_p) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP) /* Returns nonzero if DECL has external linkage, as specified by the language standard. (This predicate may hold even when the corresponding entity is not actually given external linkage in the object file; see decl_linkage for details.) */ #define DECL_EXTERNAL_LINKAGE_P(DECL) \ (decl_linkage (DECL) == lk_external) /* Keep these codes in ascending code order. */ #define INTEGRAL_CODE_P(CODE) \ ((CODE) == ENUMERAL_TYPE \ || (CODE) == BOOLEAN_TYPE \ || (CODE) == INTEGER_TYPE) /* [basic.fundamental] Types bool, char, wchar_t, and the signed and unsigned integer types are collectively called integral types. Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration types as well, which is incorrect in C++. Keep these checks in ascending code order. */ #define CP_INTEGRAL_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || TREE_CODE (TYPE) == INTEGER_TYPE) /* Returns true if TYPE is an integral or enumeration name. Keep these checks in ascending code order. */ #define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE)) /* [basic.fundamental] Integral and floating types are collectively called arithmetic types. Keep these checks in ascending code order. */ #define ARITHMETIC_TYPE_P(TYPE) \ (CP_INTEGRAL_TYPE_P (TYPE) || TREE_CODE (TYPE) == REAL_TYPE) /* [basic.types] Arithmetic types, enumeration types, pointer types, and pointer-to-member types, are collectively called scalar types. Keep these checks in ascending code order. */ #define SCALAR_TYPE_P(TYPE) \ (TYPE_PTRMEM_P (TYPE) \ || TREE_CODE (TYPE) == ENUMERAL_TYPE \ || ARITHMETIC_TYPE_P (TYPE) \ || TYPE_PTR_P (TYPE) \ || TYPE_PTRMEMFUNC_P (TYPE)) /* [dcl.init.aggr] An aggregate is an array or a class with no user-declared constructors, no private or protected non-static data members, no base classes, and no virtual functions. As an extension, we also treat vectors as aggregates. Keep these checks in ascending code order. */ #define CP_AGGREGATE_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == VECTOR_TYPE \ ||TREE_CODE (TYPE) == ARRAY_TYPE \ || (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE))) /* Nonzero for a class type means that the class type has a user-declared constructor. */ #define TYPE_HAS_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE)) /* When appearing in an INDIRECT_REF, it means that the tree structure underneath is actually a call to a constructor. This is needed when the constructor must initialize local storage (which can be automatically destroyed), rather than allowing it to allocate space from the heap. When appearing in a SAVE_EXPR, it means that underneath is a call to a constructor. When appearing in a CONSTRUCTOR, the expression is a compound literal. When appearing in a FIELD_DECL, it means that this field has been duly initialized in its constructor. */ #define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE)) /* True if NODE is a brace-enclosed initializer. */ #define BRACE_ENCLOSED_INITIALIZER_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && !TREE_TYPE (NODE)) /* True if NODE is a compound-literal, i.e., a brace-enclosed initializer cast to a particular type. */ #define COMPOUND_LITERAL_P(NODE) \ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE)) #define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \ && VEC_empty (constructor_elt, \ CONSTRUCTOR_ELTS (NODE)) \ && !TREE_HAS_CONSTRUCTOR (NODE)) /* Nonzero means that an object of this type can not be initialized using an initializer list. */ #define CLASSTYPE_NON_AGGREGATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate) #define TYPE_NON_AGGREGATE_CLASS(NODE) \ (IS_AGGR_TYPE (NODE) && CLASSTYPE_NON_AGGREGATE (NODE)) /* Nonzero if there is a user-defined X::op=(x&) for this class. */ #define TYPE_HAS_COMPLEX_ASSIGN_REF(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_assign_ref) #define TYPE_HAS_COMPLEX_INIT_REF(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_init_ref) /* Nonzero if TYPE has a trivial destructor. From [class.dtor]: A destructor is trivial if it is an implicitly declared destructor and if: - all of the direct base classes of its class have trivial destructors, - for all of the non-static data members of its class that are of class type (or array thereof), each such class has a trivial destructor. */ #define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \ (!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE)) /* Nonzero for _TYPE node means that this type does not have a trivial destructor. Therefore, destroying an object of this type will involve a call to a destructor. This can apply to objects of ARRAY_TYPE is the type of the elements needs a destructor. */ #define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \ (TYPE_LANG_FLAG_4 (NODE)) /* Nonzero for class type means that copy initialization of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_INIT_REF(NODE) \ (TYPE_HAS_INIT_REF (NODE) && ! TYPE_HAS_COMPLEX_INIT_REF (NODE)) /* Nonzero for class type means that assignment of this type can use a bitwise copy. */ #define TYPE_HAS_TRIVIAL_ASSIGN_REF(NODE) \ (TYPE_HAS_ASSIGN_REF (NODE) && ! TYPE_HAS_COMPLEX_ASSIGN_REF (NODE)) /* Returns true if NODE is a pointer-to-data-member. */ #define TYPE_PTRMEM_P(NODE) \ (TREE_CODE (NODE) == OFFSET_TYPE) /* Returns true if NODE is a pointer. */ #define TYPE_PTR_P(NODE) \ (TREE_CODE (NODE) == POINTER_TYPE) /* Returns true if NODE is an object type: [basic.types] An object type is a (possibly cv-qualified) type that is not a function type, not a reference type, and not a void type. Keep these checks in ascending order, for speed. */ #define TYPE_OBJ_P(NODE) \ (TREE_CODE (NODE) != REFERENCE_TYPE \ && TREE_CODE (NODE) != VOID_TYPE \ && TREE_CODE (NODE) != FUNCTION_TYPE \ && TREE_CODE (NODE) != METHOD_TYPE) /* Returns true if NODE is a pointer to an object. Keep these checks in ascending tree code order. */ #define TYPE_PTROB_P(NODE) \ (TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a reference to an object. Keep these checks in ascending tree code order. */ #define TYPE_REF_OBJ_P(NODE) \ (TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE))) /* Returns true if NODE is a pointer to an object, or a pointer to void. Keep these checks in ascending tree code order. */ #define TYPE_PTROBV_P(NODE) \ (TYPE_PTR_P (NODE) \ && !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \ || TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)) /* Returns true if NODE is a pointer to function. */ #define TYPE_PTRFN_P(NODE) \ (TREE_CODE (NODE) == POINTER_TYPE \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Returns true if NODE is a reference to function. */ #define TYPE_REFFN_P(NODE) \ (TREE_CODE (NODE) == REFERENCE_TYPE \ && TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE) /* Nonzero for _TYPE node means that this type is a pointer to member function type. */ #define TYPE_PTRMEMFUNC_P(NODE) \ (TREE_CODE (NODE) == RECORD_TYPE \ && TYPE_LANG_SPECIFIC (NODE) \ && TYPE_PTRMEMFUNC_FLAG (NODE)) #define TYPE_PTRMEMFUNC_FLAG(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->ptrmemfunc_flag) /* Returns true if NODE is a pointer-to-member. */ #define TYPE_PTR_TO_MEMBER_P(NODE) \ (TYPE_PTRMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE)) /* Indicates when overload resolution may resolve to a pointer to member function. [expr.unary.op]/3 */ #define PTRMEM_OK_P(NODE) \ TREE_LANG_FLAG_0 (TREE_CHECK2 ((NODE), ADDR_EXPR, OFFSET_REF)) /* Get the POINTER_TYPE to the METHOD_TYPE associated with this pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true, before using this macro. */ #define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \ (TREE_TYPE (TYPE_FIELDS (NODE))) /* Returns `A' for a type like `int (A::*)(double)' */ #define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \ TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* These are use to manipulate the canonical RECORD_TYPE from the hashed POINTER_TYPE, and can only be used on the POINTER_TYPE. */ #define TYPE_GET_PTRMEMFUNC_TYPE(NODE) \ (TYPE_LANG_SPECIFIC (NODE) ? LANG_TYPE_PTRMEM_CHECK (NODE)->record : NULL) #define TYPE_SET_PTRMEMFUNC_TYPE(NODE, VALUE) \ do { \ if (TYPE_LANG_SPECIFIC (NODE) == NULL) \ { \ TYPE_LANG_SPECIFIC (NODE) = GGC_CNEWVAR \ (struct lang_type, sizeof (struct lang_type_ptrmem)); \ TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.h.is_lang_type_class = 0; \ } \ TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.record = (VALUE); \ } while (0) /* For a pointer-to-member type of the form `T X::*', this is `X'. For a type like `void (X::*)() const', this type is `X', not `const X'. To get at the `const X' you have to look at the TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have type `const X*'. */ #define TYPE_PTRMEM_CLASS_TYPE(NODE) \ (TYPE_PTRMEM_P (NODE) \ ? TYPE_OFFSET_BASETYPE (NODE) \ : TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE)) /* For a pointer-to-member type of the form `T X::*', this is `T'. */ #define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \ (TYPE_PTRMEM_P (NODE) \ ? TREE_TYPE (NODE) \ : TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE))) /* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for `X'. */ #define PTRMEM_CST_CLASS(NODE) \ TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE))) /* For a pointer-to-member constant `X::Y' this is the _DECL for `Y'. */ #define PTRMEM_CST_MEMBER(NODE) (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member) /* The expression in question for a TYPEOF_TYPE. */ #define TYPEOF_TYPE_EXPR(NODE) (TYPEOF_TYPE_CHECK (NODE))->type.values /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_EXTERN(NODE) \ DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was specified in its declaration. This can also be set for an erroneously declared PARM_DECL. */ #define DECL_THIS_STATIC(NODE) \ DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE)) /* Nonzero for FIELD_DECL node means that this field is a base class of the parent object, as opposed to a member field. */ #define DECL_FIELD_IS_BASE(NODE) \ DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE)) /* Nonzero if TYPE is an anonymous union or struct type. We have to use a flag for this because "A union for which objects or pointers are declared is not an anonymous union" [class.union]. */ #define ANON_AGGR_TYPE_P(NODE) \ (CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr) #define SET_ANON_AGGR_TYPE_P(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1) /* Nonzero if TYPE is an anonymous union type. */ #define ANON_UNION_TYPE_P(NODE) \ (TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE)) #define UNKNOWN_TYPE LANG_TYPE /* Define fields and accessors for nodes representing declared names. */ #define TYPE_WAS_ANONYMOUS(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous) #ifdef KEY /* Nonzero means this type's copy constructor is implicitly defined instead of user defined. */ #define TYPE_HAS_IMPLICIT_COPY_CONSTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->has_implicit_copy_constructor) /* The copy constructor to use if the front-end needs to copy objects. */ #define CLASSTYPE_COPY_CONSTRUCTOR(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->copy_constructor) #endif /* C++: all of these are overloaded! These apply only to TYPE_DECLs. */ /* The format of each node in the DECL_FRIENDLIST is as follows: The TREE_PURPOSE will be the name of a function, i.e., an IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose TREE_VALUEs are friends with the given name. */ #define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE)) #define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST)) #define FRIEND_DECLS(LIST) (TREE_VALUE (LIST)) /* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of each node is a type; the TREE_VALUE is the access granted for this DECL in that type. The DECL_ACCESS is set by access declarations. For example, if a member that would normally be public in a derived class is made protected, then the derived class and the protected_access_node will appear in the DECL_ACCESS for the node. */ #define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access) /* Nonzero if the FUNCTION_DECL is a global constructor. */ #define DECL_GLOBAL_CTOR_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.global_ctor_p) /* Nonzero if the FUNCTION_DECL is a global destructor. */ #define DECL_GLOBAL_DTOR_P(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.global_dtor_p) /* Accessor macros for C++ template decl nodes. */ /* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node is a INT_CST whose TREE_INT_CST_LOW indicates the level of the template parameters, with 1 being the outermost set of template parameters. The TREE_VALUE is a vector, whose elements are the template parameters at each level. Each element in the vector is a TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a non-type parameter), or a TYPE_DECL (if the parameter is a type parameter). The TREE_PURPOSE is the default value, if any. The TEMPLATE_PARM_INDEX for the parameter is available as the DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a TYPE_DECL). */ #define DECL_TEMPLATE_PARMS(NODE) DECL_NON_COMMON_CHECK (NODE)->decl_non_common.arguments #define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \ INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE)) #define DECL_NTPARMS(NODE) \ TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE)) /* For function, method, class-data templates. */ #define DECL_TEMPLATE_RESULT(NODE) DECL_RESULT_FLD (NODE) /* For a static member variable template, the DECL_TEMPLATE_INSTANTIATIONS list contains the explicitly and implicitly generated instantiations of the variable. There are no partial instantiations of static member variables, so all of these will be full instantiations. For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds all instantiations and specializations of the class type, including partial instantiations and partial specializations. In both cases, the TREE_PURPOSE of each node contains the arguments used; the TREE_VALUE contains the generated variable. The template arguments are always complete. For example, given: template <class T> struct S1 { template <class U> struct S2 {}; template <class U> struct S2<U*> {}; }; the record for the partial specialization will contain, as its argument list, { {T}, {U*} }, and will be on the DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template <class U> struct S1<T>::S2'. This list is not used for function templates. */ #define DECL_TEMPLATE_INSTANTIATIONS(NODE) DECL_VINDEX (NODE) /* For a function template, the DECL_TEMPLATE_SPECIALIZATIONS lists contains all instantiations and specializations of the function, including partial instantiations. For a partial instantiation which is a specialization, this list holds only full specializations of the template that are instantiations of the partial instantiation. For example, given: template <class T> struct S { template <class U> void f(U); template <> void f(T); }; the `S<int>::f<int>(int)' function will appear on the DECL_TEMPLATE_SPECIALIZATIONS list for both `template <class T> template <class U> void S<T>::f(U)' and `template <class T> void S<int>::f(T)'. In the latter case, however, it will have only the innermost set of arguments (T, in this case). The DECL_TI_TEMPLATE for the function declaration will point at the specialization, not the fully general template. For a class template, this list contains the partial specializations of this template. (Full specializations are not recorded on this list.) The TREE_PURPOSE holds the arguments used in the partial specialization (e.g., for `template <class T> struct S<T*, int>' this will be `T*'.) The arguments will also include any outer template arguments. The TREE_VALUE holds the innermost template parameters for the specialization (e.g., `T' in the example above.) The TREE_TYPE is the _TYPE node for the partial specialization. This list is not used for static variable templates. */ #define DECL_TEMPLATE_SPECIALIZATIONS(NODE) DECL_SIZE (NODE) /* Nonzero for a DECL which is actually a template parameter. Keep these checks in ascending tree code order. */ #define DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) \ && (TREE_CODE (NODE) == CONST_DECL \ || TREE_CODE (NODE) == PARM_DECL \ || TREE_CODE (NODE) == TYPE_DECL \ || TREE_CODE (NODE) == TEMPLATE_DECL)) /* Mark NODE as a template parameter. */ #define SET_DECL_TEMPLATE_PARM_P(NODE) \ (DECL_LANG_FLAG_0 (NODE) = 1) /* Nonzero if NODE is a template template parameter. */ #define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE)) /* Nonzero if NODE is a TEMPLATE_DECL representing an UNBOUND_CLASS_TEMPLATE tree node. */ #define DECL_UNBOUND_CLASS_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL && !DECL_TEMPLATE_RESULT (NODE)) #define DECL_FUNCTION_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && !DECL_UNBOUND_CLASS_TEMPLATE_P (NODE) \ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL) /* Nonzero for a DECL that represents a template class. */ #define DECL_CLASS_TEMPLATE_P(NODE) \ (TREE_CODE (NODE) == TEMPLATE_DECL \ && !DECL_UNBOUND_CLASS_TEMPLATE_P (NODE) \ && TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL \ && !DECL_TEMPLATE_TEMPLATE_PARM_P (NODE)) /* Nonzero if NODE which declares a type. */ #define DECL_DECLARES_TYPE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL || DECL_CLASS_TEMPLATE_P (NODE)) /* Nonzero if NODE is the typedef implicitly generated for a type when the type is declared. In C++, `struct S {};' is roughly equivalent to `struct S {}; typedef struct S S;' in C. DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this example. In C++, there is a second implicit typedef for each class, in the scope of `S' itself, so that you can say `S::S'. DECL_SELF_REFERENCE_P will hold for that second typedef. */ #define DECL_IMPLICIT_TYPEDEF_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE)) #define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \ (DECL_LANG_FLAG_2 (NODE) = 1) #define DECL_SELF_REFERENCE_P(NODE) \ (TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE)) #define SET_DECL_SELF_REFERENCE_P(NODE) \ (DECL_LANG_FLAG_4 (NODE) = 1) /* A `primary' template is one that has its own template header. A member function of a class template is a template, but not primary. A member template is primary. Friend templates are primary, too. */ /* Returns the primary template corresponding to these parameters. */ #define DECL_PRIMARY_TEMPLATE(NODE) \ (TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE))) /* Returns nonzero if NODE is a primary template. */ #define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE)) /* Non-zero iff NODE is a specialization of a template. The value indicates the type of specializations: 1=implicit instantiation 2=partial or explicit specialization, e.g.: template <> int min<int> (int, int), 3=explicit instantiation, e.g.: template int min<int> (int, int); Note that NODE will be marked as a specialization even if the template it is instantiating is not a primary template. For example, given: template <typename T> struct O { void f(); struct I {}; }; both O<int>::f and O<int>::I will be marked as instantiations. If DECL_USE_TEMPLATE is non-zero, then DECL_TEMPLATE_INFO will also be non-NULL. */ #define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->decl_flags.use_template) /* Like DECL_USE_TEMPLATE, but for class types. */ #define CLASSTYPE_USE_TEMPLATE(NODE) \ (LANG_TYPE_CLASS_CHECK (NODE)->use_template) /* True if NODE is a specialization of a primary template. */ #define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \ (CLASS_TYPE_P (NODE) \ && CLASSTYPE_USE_TEMPLATE (NODE) \ && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (arg))) #define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1) #define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) & 1) #define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2) #define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2) /* Returns true for an explicit or partial specialization of a class template. */ #define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 2) #define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 2) #define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1) #define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1) #define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 1) #define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 1) #define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3) #define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3) #define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) == 3) #define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \ (CLASSTYPE_USE_TEMPLATE (NODE) = 3) /* Nonzero if DECL is a friend function which is an instantiation from the point of view of the compiler, but not from the point of view of the language. For example given: template <class T> struct S { friend void f(T) {}; }; the declaration of `void f(int)' generated when S<int> is instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be a DECL_FRIEND_PSUEDO_TEMPLATE_INSTANTIATION. */ #define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \ (DECL_TEMPLATE_INFO (DECL) && !DECL_USE_TEMPLATE (DECL)) /* Nonzero iff we are currently processing a declaration for an entity with its own template parameter list, and which is not a full specialization. */ #define PROCESSING_REAL_TEMPLATE_DECL_P() \ (processing_template_decl > template_class_depth (current_scope ())) /* Nonzero if this VAR_DECL or FUNCTION_DECL has already been instantiated, i.e. its definition has been generated from the pattern given in the template. */ #define DECL_TEMPLATE_INSTANTIATED(NODE) \ DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE)) /* We know what we're doing with this decl now. */ #define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE) /* DECL_EXTERNAL must be set on a decl until the decl is actually emitted, so that assemble_external will work properly. So we have this flag to tell us whether the decl is really not external. */ #define DECL_NOT_REALLY_EXTERN(NODE) \ (DECL_LANG_SPECIFIC (NODE)->decl_flags.not_really_extern) #define DECL_REALLY_EXTERN(NODE) \ (DECL_EXTERNAL (NODE) && ! DECL_NOT_REALLY_EXTERN (NODE)) /* A thunk is a stub function. A thunk is an alternate entry point for an ordinary FUNCTION_DECL. The address of the ordinary FUNCTION_DECL is given by the DECL_INITIAL, which is always an ADDR_EXPR whose operand is a FUNCTION_DECL. The job of the thunk is to either adjust the this pointer before transferring control to the FUNCTION_DECL, or call FUNCTION_DECL and then adjust the result value. Note, the result pointer adjusting thunk must perform a call to the thunked function, (or be implemented via passing some invisible parameter to the thunked function, which is modified to perform the adjustment just before returning). A thunk may perform either, or both, of the following operations: o Adjust the this or result pointer by a constant offset. o Adjust the this or result pointer by looking up a vcall or vbase offset in the vtable. A this pointer adjusting thunk converts from a base to a derived class, and hence adds the offsets. A result pointer adjusting thunk converts from a derived class to a base, and hence subtracts the offsets. If both operations are performed, then the constant adjustment is performed first for this pointer adjustment and last for the result pointer adjustment. The constant adjustment is given by THUNK_FIXED_OFFSET. If the vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is used. For this pointer adjusting thunks, it is the vcall offset into the vtable. For result pointer adjusting thunks it is the binfo of the virtual base to convert to. Use that binfo's vbase offset. It is possible to have equivalent covariant thunks. These are distinct virtual covariant thunks whose vbase offsets happen to have the same value. THUNK_ALIAS is used to pick one as the canonical thunk, which will get all the this pointer adjusting thunks attached to it. */ /* An integer indicating how many bytes should be subtracted from the this or result pointer when this function is called. */ #define THUNK_FIXED_OFFSET(DECL) \ (DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.f.u5.fixed_offset) /* A tree indicating how to perform the virtual adjustment. For a this adjusting thunk it is the number of bytes to be added to the vtable to find the vcall offset. For a result adjusting thunk, it is the binfo of the relevant virtual base. If NULL, then there is no virtual adjust. (The vptr is always located at offset zero from the this or result pointer.) (If the covariant type is within the class hierarchy being laid out, the vbase index is not yet known at the point we need to create the thunks, hence the need to use binfos.) */ #define THUNK_VIRTUAL_OFFSET(DECL) \ (LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access) /* A thunk which is equivalent to another thunk. */ #define THUNK_ALIAS(DECL) \ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->decl_flags.u.template_info) /* For thunk NODE, this is the FUNCTION_DECL thunked to. It is possible for the target to be a thunk too. */ #define THUNK_TARGET(NODE) \ (DECL_LANG_SPECIFIC (NODE)->u.f.befriending_classes) /* True for a SCOPE_REF iff the "template" keyword was used to indicate that the qualified name denotes a template. */ #define QUALIFIED_NAME_IS_TEMPLATE(NODE) \ (TREE_LANG_FLAG_0 (SCOPE_REF_CHECK (NODE))) /* True for an OMP_ATOMIC that has dependent parameters. These are stored as bare LHS/RHS, and not as ADDR/RHS, as in the generic statement. */ #define OMP_ATOMIC_DEPENDENT_P(NODE) \ (TREE_LANG_FLAG_0 (OMP_ATOMIC_CHECK (NODE))) /* Used to store the operation code when OMP_ATOMIC_DEPENDENT_P is set. */ #define OMP_ATOMIC_CODE(NODE) \ (OMP_ATOMIC_CHECK (NODE)->exp.complexity) /* Used while gimplifying continue statements bound to OMP_FOR nodes. */ #define OMP_FOR_GIMPLIFYING_P(NODE) \ (TREE_LANG_FLAG_0 (OMP_FOR_CHECK (NODE))) /* A language-specific token attached to the OpenMP data clauses to hold code (or code fragments) related to ctors, dtors, and op=. See semantics.c for details. */ #define CP_OMP_CLAUSE_INFO(NODE) \ TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \ OMP_CLAUSE_COPYPRIVATE)) /* These macros provide convenient access to the various _STMT nodes created when parsing template declarations. */ #define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0) #define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1) #define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0) #define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1) #define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0) /* Nonzero if this try block is a function try block. */ #define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE)) #define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0) #define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1) #define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE)) /* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run and the VAR_DECL for which this cleanup exists. */ #define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0) #define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1) #define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2) /* IF_STMT accessors. These give access to the condition of the if statement, the then block of the if statement, and the else block of the if statement if it exists. */ #define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0) #define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1) #define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2) /* WHILE_STMT accessors. These give access to the condition of the while statement and the body of the while statement, respectively. */ #define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0) #define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1) /* DO_STMT accessors. These give access to the condition of the do statement and the body of the do statement, respectively. */ #define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0) #define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1) /* FOR_STMT accessors. These give access to the init statement, condition, update expression, and body of the for statement, respectively. */ #define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0) #define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1) #define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2) #define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3) #define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0) #define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1) #define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2) /* STMT_EXPR accessor. */ #define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0) /* EXPR_STMT accessor. This gives the expression associated with an expression statement. */ #define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0) /* True if this TARGET_EXPR was created by build_cplus_new, and so we can discard it if it isn't useful. */ #define TARGET_EXPR_IMPLICIT_P(NODE) \ TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE)) /* An enumeration of the kind of tags that C++ accepts. */ enum tag_types { none_type = 0, /* Not a tag type. */ record_type, /* "struct" types. */ class_type, /* "class" types. */ union_type, /* "union" types. */ enum_type, /* "enum" types. */ typename_type /* "typename" types. */ }; /* The various kinds of lvalues we distinguish. */ typedef enum cp_lvalue_kind { clk_none = 0, /* Things that are not an lvalue. */ clk_ordinary = 1, /* An ordinary lvalue. */ clk_class = 2, /* An rvalue of class-type. */ clk_bitfield = 4, /* An lvalue for a bit-field. */ clk_packed = 8 /* An lvalue for a packed field. */ } cp_lvalue_kind; /* Various kinds of template specialization, instantiation, etc. */ typedef enum tmpl_spec_kind { tsk_none, /* Not a template at all. */ tsk_invalid_member_spec, /* An explicit member template specialization, but the enclosing classes have not all been explicitly specialized. */ tsk_invalid_expl_inst, /* An explicit instantiation containing template parameter lists. */ tsk_excessive_parms, /* A template declaration with too many template parameter lists. */ tsk_insufficient_parms, /* A template declaration with too few parameter lists. */ tsk_template, /* A template declaration. */ tsk_expl_spec, /* An explicit specialization. */ tsk_expl_inst /* An explicit instantiation. */ } tmpl_spec_kind; /* The various kinds of access. BINFO_ACCESS depends on these being two bit quantities. The numerical values are important; they are used to initialize RTTI data structures, so changing them changes the ABI. */ typedef enum access_kind { ak_none = 0, /* Inaccessible. */ ak_public = 1, /* Accessible, as a `public' thing. */ ak_protected = 2, /* Accessible, as a `protected' thing. */ ak_private = 3 /* Accessible, as a `private' thing. */ } access_kind; /* The various kinds of special functions. If you add to this list, you should update special_function_p as well. */ typedef enum special_function_kind { sfk_none = 0, /* Not a special function. This enumeral must have value zero; see special_function_p. */ sfk_constructor, /* A constructor. */ sfk_copy_constructor, /* A copy constructor. */ sfk_assignment_operator, /* An assignment operator. */ sfk_destructor, /* A destructor. */ sfk_complete_destructor, /* A destructor for complete objects. */ sfk_base_destructor, /* A destructor for base subobjects. */ sfk_deleting_destructor, /* A destructor for complete objects that deletes the object after it has been destroyed. */ sfk_conversion /* A conversion operator. */ } special_function_kind; /* The various kinds of linkage. From [basic.link], A name is said to have linkage when it might denote the same object, reference, function, type, template, namespace or value as a name introduced in another scope: -- When a name has external linkage, the entity it denotes can be referred to from scopes of other translation units or from other scopes of the same translation unit. -- When a name has internal linkage, the entity it denotes can be referred to by names from other scopes in the same translation unit. -- When a name has no linkage, the entity it denotes cannot be referred to by names from other scopes. */ typedef enum linkage_kind { lk_none, /* No linkage. */ lk_internal, /* Internal linkage. */ lk_external /* External linkage. */ } linkage_kind; /* Bitmask flags to control type substitution. */ typedef enum tsubst_flags_t { tf_none = 0, /* nothing special */ tf_error = 1 << 0, /* give error messages */ tf_warning = 1 << 1, /* give warnings too */ tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */ tf_keep_type_decl = 1 << 3, /* retain typedef type decls (make_typename_type use) */ tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal instantiate_type use) */ tf_user = 1 << 5, /* found template must be a user template (lookup_template_class use) */ tf_conv = 1 << 6, /* We are determining what kind of conversion might be permissible, not actually performing the conversion. */ /* Convenient substitution flags combinations. */ tf_warning_or_error = tf_warning | tf_error } tsubst_flags_t; /* The kind of checking we can do looking in a class hierarchy. */ typedef enum base_access { ba_any = 0, /* Do not check access, allow an ambiguous base, prefer a non-virtual base */ ba_unique = 1 << 0, /* Must be a unique base. */ ba_check_bit = 1 << 1, /* Check access. */ ba_check = ba_unique | ba_check_bit, ba_ignore_scope = 1 << 2, /* Ignore access allowed by local scope. */ ba_quiet = 1 << 3 /* Do not issue error messages. */ } base_access; /* The various kinds of access check during parsing. */ typedef enum deferring_kind { dk_no_deferred = 0, /* Check access immediately */ dk_deferred = 1, /* Deferred check */ dk_no_check = 2 /* No access check */ } deferring_kind; /* The kind of base we can find, looking in a class hierarchy. Values <0 indicate we failed. */ typedef enum base_kind { bk_inaccessible = -3, /* The base is inaccessible */ bk_ambig = -2, /* The base is ambiguous */ bk_not_base = -1, /* It is not a base */ bk_same_type = 0, /* It is the same type */ bk_proper_base = 1, /* It is a proper base */ bk_via_virtual = 2 /* It is a proper base, but via a virtual path. This might not be the canonical binfo. */ } base_kind; /* Node for "pointer to (virtual) function". This may be distinct from ptr_type_node so gdb can distinguish them. */ #define vfunc_ptr_type_node vtable_entry_type /* For building calls to `delete'. */ extern GTY(()) tree integer_two_node; extern GTY(()) tree integer_three_node; /* The number of function bodies which we are currently processing. (Zero if we are at namespace scope, one inside the body of a function, two inside the body of a function in a local class, etc.) */ extern int function_depth; /* in pt.c */ /* These values are used for the `STRICT' parameter to type_unification and fn_type_unification. Their meanings are described with the documentation for fn_type_unification. */ typedef enum unification_kind_t { DEDUCE_CALL, DEDUCE_CONV, DEDUCE_EXACT } unification_kind_t; /* Macros for operating on a template instantiation level node. */ #define TINST_DECL(NODE) \ (((tinst_level_t) TINST_LEVEL_CHECK (NODE))->decl) #define TINST_LOCATION(NODE) \ (((tinst_level_t) TINST_LEVEL_CHECK (NODE))->locus) #define TINST_IN_SYSTEM_HEADER_P(NODE) \ (((tinst_level_t) TINST_LEVEL_CHECK (NODE))->in_system_header_p) /* in class.c */ extern int current_class_depth; /* An array of all local classes present in this translation unit, in declaration order. */ extern GTY(()) VEC(tree,gc) *local_classes; /* Here's where we control how name mangling takes place. */ /* Cannot use '$' up front, because this confuses gdb (names beginning with '$' are gdb-local identifiers). Note that all forms in which the '$' is significant are long enough for direct indexing (meaning that if we know there is a '$' at a particular location, we can index into the string at any other location that provides distinguishing characters). */ /* Define NO_DOLLAR_IN_LABEL in your favorite tm file if your assembler doesn't allow '$' in symbol names. */ #ifndef NO_DOLLAR_IN_LABEL #define JOINER '$' #define AUTO_TEMP_NAME "_$tmp_" #define VFIELD_BASE "$vf" #define VFIELD_NAME "_vptr$" #define VFIELD_NAME_FORMAT "_vptr$%s" #define ANON_AGGRNAME_FORMAT "$_%d" #else /* NO_DOLLAR_IN_LABEL */ #ifndef NO_DOT_IN_LABEL #define JOINER '.' #define AUTO_TEMP_NAME "_.tmp_" #define VFIELD_BASE ".vf" #define VFIELD_NAME "_vptr." #define VFIELD_NAME_FORMAT "_vptr.%s" #define ANON_AGGRNAME_FORMAT "._%d" #else /* NO_DOT_IN_LABEL */ #define IN_CHARGE_NAME "__in_chrg" #define AUTO_TEMP_NAME "__tmp_" #define TEMP_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \ sizeof (AUTO_TEMP_NAME) - 1)) #define VTABLE_NAME "__vt_" #define VTABLE_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \ sizeof (VTABLE_NAME) - 1)) #define VFIELD_BASE "__vfb" #define VFIELD_NAME "__vptr_" #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \ sizeof (VFIELD_NAME) - 1)) #define VFIELD_NAME_FORMAT "__vptr_%s" #define ANON_AGGRNAME_PREFIX "__anon_" #define ANON_AGGRNAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), ANON_AGGRNAME_PREFIX, \ sizeof (ANON_AGGRNAME_PREFIX) - 1)) #define ANON_AGGRNAME_FORMAT "__anon_%d" #endif /* NO_DOT_IN_LABEL */ #endif /* NO_DOLLAR_IN_LABEL */ #define THIS_NAME "this" #define IN_CHARGE_NAME "__in_chrg" #define VTBL_PTR_TYPE "__vtbl_ptr_type" #define VTABLE_DELTA_NAME "__delta" #define VTABLE_PFN_NAME "__pfn" #if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) #define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \ && IDENTIFIER_POINTER (ID_NODE)[2] == 't' \ && IDENTIFIER_POINTER (ID_NODE)[3] == JOINER) #define TEMP_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1)) #define VFIELD_NAME_P(ID_NODE) \ (!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1)) /* For anonymous aggregate types, we need some sort of name to hold on to. In practice, this should not appear, but it should not be harmful if it does. */ #define ANON_AGGRNAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[0] == JOINER \ && IDENTIFIER_POINTER (ID_NODE)[1] == '_') #endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */ /* Nonzero if we're done parsing and into end-of-file activities. */ extern int at_eof; /* A list of namespace-scope objects which have constructors or destructors which reside in the global scope. The decl is stored in the TREE_VALUE slot and the initializer is stored in the TREE_PURPOSE slot. */ extern GTY(()) tree static_aggregates; /* Functions called along with real static constructors and destructors. */ extern GTY(()) tree static_ctors; extern GTY(()) tree static_dtors; enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, OP_FLAG, TYPENAME_FLAG }; /* These are uses as bits in flags passed to various functions to control their behavior. Despite the LOOKUP_ prefix, many of these do not control name lookup. ??? Functions using these flags should probably be modified to accept explicit boolean flags for the behaviors relevant to them. */ /* Check for access violations. */ #define LOOKUP_PROTECT (1 << 0) /* Complain if no suitable member function matching the arguments is found. */ #define LOOKUP_COMPLAIN (1 << 1) #define LOOKUP_NORMAL (LOOKUP_PROTECT | LOOKUP_COMPLAIN) /* Even if the function found by lookup is a virtual function, it should be called directly. */ #define LOOKUP_NONVIRTUAL (1 << 2) /* Non-converting (i.e., "explicit") constructors are not tried. */ #define LOOKUP_ONLYCONVERTING (1 << 3) /* If a temporary is created, it should be created so that it lives as long as the current variable bindings; otherwise it only lives until the end of the complete-expression. It also forces direct-initialization in cases where other parts of the compiler have already generated a temporary, such as reference initialization and the catch parameter. */ #define DIRECT_BIND (1 << 4) /* User-defined conversions are not permitted. (Built-in conversions are permitted.) */ #define LOOKUP_NO_CONVERSION (1 << 5) /* The user has explicitly called a destructor. (Therefore, we do not need to check that the object is non-NULL before calling the destructor.) */ #define LOOKUP_DESTRUCTOR (1 << 6) /* Do not permit references to bind to temporaries. */ #define LOOKUP_NO_TEMP_BIND (1 << 7) /* Do not accept objects, and possibly namespaces. */ #define LOOKUP_PREFER_TYPES (1 << 8) /* Do not accept objects, and possibly types. */ #define LOOKUP_PREFER_NAMESPACES (1 << 9) /* Accept types or namespaces. */ #define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES) /* We are checking that a constructor can be called -- but we do not actually plan to call it. */ #define LOOKUP_CONSTRUCTOR_CALLABLE (1 << 10) /* Return friend declarations and un-declared builtin functions. (Normally, these entities are registered in the symbol table, but not found by lookup.) */ #define LOOKUP_HIDDEN (LOOKUP_CONSTRUCTOR_CALLABLE << 1) #define LOOKUP_NAMESPACES_ONLY(F) \ (((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_TYPES_ONLY(F) \ (!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES)) #define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH) /* These flags are used by the conversion code. CONV_IMPLICIT : Perform implicit conversions (standard and user-defined). CONV_STATIC : Perform the explicit conversions for static_cast. CONV_CONST : Perform the explicit conversions for const_cast. CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast. CONV_PRIVATE : Perform upcasts to private bases. CONV_FORCE_TEMP : Require a new temporary when converting to the same aggregate type. */ #define CONV_IMPLICIT 1 #define CONV_STATIC 2 #define CONV_CONST 4 #define CONV_REINTERPRET 8 #define CONV_PRIVATE 16 /* #define CONV_NONCONVERTING 32 */ #define CONV_FORCE_TEMP 64 #define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET) #define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \ | CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP) /* Used by build_expr_type_conversion to indicate which types are acceptable as arguments to the expression under consideration. */ #define WANT_INT 1 /* integer types, including bool */ #define WANT_FLOAT 2 /* floating point types */ #define WANT_ENUM 4 /* enumerated types */ #define WANT_POINTER 8 /* pointer types */ #define WANT_NULL 16 /* null pointer constant */ #define WANT_VECTOR 32 /* vector types */ #define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR) /* Used with comptypes, and related functions, to guide type comparison. */ #define COMPARE_STRICT 0 /* Just check if the types are the same. */ #define COMPARE_BASE 1 /* Check to see if the second type is derived from the first. */ #define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in reverse. */ #define COMPARE_REDECLARATION 4 /* The comparison is being done when another declaration of an existing entity is seen. */ /* Used with push_overloaded_decl. */ #define PUSH_GLOBAL 0 /* Push the DECL into namespace scope, regardless of the current scope. */ #define PUSH_LOCAL 1 /* Push the DECL into the current scope. */ #define PUSH_USING 2 /* We are pushing this DECL as the result of a using declaration. */ /* Used with start function. */ #define SF_DEFAULT 0 /* No flags. */ #define SF_PRE_PARSED 1 /* The function declaration has already been parsed. */ #define SF_INCLASS_INLINE 2 /* The function is an inline, defined in the class body. */ /* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2 is derived from TYPE1, or if TYPE2 is a pointer (reference) to a class derived from the type pointed to (referred to) by TYPE1. */ #define same_or_base_type_p(TYPE1, TYPE2) \ comptypes ((TYPE1), (TYPE2), COMPARE_BASE) /* These macros are used to access a TEMPLATE_PARM_INDEX. */ #define TEMPLATE_PARM_INDEX_CAST(NODE) \ ((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE)) #define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index) #define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level) #define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE)) #define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level) #define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl) /* These macros are for accessing the fields of TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */ #define TEMPLATE_TYPE_PARM_INDEX(NODE) \ (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, TEMPLATE_TEMPLATE_PARM, \ BOUND_TEMPLATE_TEMPLATE_PARM))->type.values #define TEMPLATE_TYPE_IDX(NODE) \ (TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_LEVEL(NODE) \ (TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \ (TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE))) #define TEMPLATE_TYPE_DECL(NODE) \ (TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE))) /* These constants can used as bit flags in the process of tree formatting. TFF_PLAIN_IDENTIFIER: unqualified part of a name. TFF_SCOPE: include the class and namespace scope of the name. TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name. TFF_DECL_SPECIFIERS: print decl-specifiers. TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with a class-key (resp. `enum'). TFF_RETURN_TYPE: include function return type. TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values. TFF_EXCEPTION_SPECIFICATION: show function exception specification. TFF_TEMPLATE_HEADER: show the template<...> header in a template-declaration. TFF_TEMPLATE_NAME: show only template-name. TFF_EXPR_IN_PARENS: parenthesize expressions. TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments. */ #define TFF_PLAIN_IDENTIFIER (0) #define TFF_SCOPE (1) #define TFF_CHASE_TYPEDEF (1 << 1) #define TFF_DECL_SPECIFIERS (1 << 2) #define TFF_CLASS_KEY_OR_ENUM (1 << 3) #define TFF_RETURN_TYPE (1 << 4) #define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5) #define TFF_EXCEPTION_SPECIFICATION (1 << 6) #define TFF_TEMPLATE_HEADER (1 << 7) #define TFF_TEMPLATE_NAME (1 << 8) #define TFF_EXPR_IN_PARENS (1 << 9) #define TFF_NO_FUNCTION_ARGUMENTS (1 << 10) /* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM node. */ #define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \ ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \ ? TYPE_TI_TEMPLATE (NODE) \ : TYPE_NAME (NODE)) /* in lex.c */ extern void init_reswords (void); /* Indexed by TREE_CODE, these tables give C-looking names to operators represented by TREE_CODES. For example, opname_tab[(int) MINUS_EXPR] == "-". */ extern const char **opname_tab, **assignop_tab; typedef struct operator_name_info_t GTY(()) { /* The IDENTIFIER_NODE for the operator. */ tree identifier; /* The name of the operator. */ const char *name; /* The mangled name of the operator. */ const char *mangled_name; /* The arity of the operator. */ int arity; } operator_name_info_t; /* A mapping from tree codes to operator name information. */ extern GTY(()) operator_name_info_t operator_name_info [(int) LAST_CPLUS_TREE_CODE]; /* Similar, but for assignment operators. */ extern GTY(()) operator_name_info_t assignment_operator_name_info [(int) LAST_CPLUS_TREE_CODE]; /* A type-qualifier, or bitmask therefore, using the TYPE_QUAL constants. */ typedef int cp_cv_quals; /* A storage class. */ typedef enum cp_storage_class { /* sc_none must be zero so that zeroing a cp_decl_specifier_seq sets the storage_class field to sc_none. */ sc_none = 0, sc_auto, sc_register, sc_static, sc_extern, sc_mutable } cp_storage_class; /* An individual decl-specifier. */ typedef enum cp_decl_spec { ds_first, ds_signed = ds_first, ds_unsigned, ds_short, ds_long, ds_const, ds_volatile, ds_restrict, ds_inline, ds_virtual, ds_explicit, ds_friend, ds_typedef, ds_complex, ds_thread, ds_last } cp_decl_spec; /* A decl-specifier-seq. */ typedef struct cp_decl_specifier_seq { /* The number of times each of the keywords has been seen. */ unsigned specs[(int) ds_last]; /* The primary type, if any, given by the decl-specifier-seq. Modifiers, like "short", "const", and "unsigned" are not reflected here. This field will be a TYPE, unless a typedef-name was used, in which case it will be a TYPE_DECL. */ tree type; /* The attributes, if any, provided with the specifier sequence. */ tree attributes; /* If non-NULL, a built-in type that the user attempted to redefine to some other type. */ tree redefined_builtin_type; /* The storage class specified -- or sc_none if no storage class was explicitly specified. */ cp_storage_class storage_class; /* True iff TYPE_SPEC indicates a user-defined type. */ BOOL_BITFIELD user_defined_type_p : 1; /* True iff multiple types were (erroneously) specified for this decl-specifier-seq. */ BOOL_BITFIELD multiple_types_p : 1; /* True iff multiple storage classes were (erroneously) specified for this decl-specifier-seq or a combination of a storage class with a typedef specifier. */ BOOL_BITFIELD conflicting_specifiers_p : 1; /* True iff at least one decl-specifier was found. */ BOOL_BITFIELD any_specifiers_p : 1; /* True iff "int" was explicitly provided. */ BOOL_BITFIELD explicit_int_p : 1; /* True iff "char" was explicitly provided. */ BOOL_BITFIELD explicit_char_p : 1; } cp_decl_specifier_seq; /* The various kinds of declarators. */ typedef enum cp_declarator_kind { cdk_id, cdk_function, cdk_array, cdk_pointer, cdk_reference, cdk_ptrmem, cdk_error } cp_declarator_kind; /* A declarator. */ typedef struct cp_declarator cp_declarator; typedef struct cp_parameter_declarator cp_parameter_declarator; /* A parameter, before it has been semantically analyzed. */ struct cp_parameter_declarator { /* The next parameter, or NULL_TREE if none. */ cp_parameter_declarator *next; /* The decl-specifiers-seq for the parameter. */ cp_decl_specifier_seq decl_specifiers; /* The declarator for the parameter. */ cp_declarator *declarator; /* The default-argument expression, or NULL_TREE, if none. */ tree default_argument; /* True iff this is the first parameter in the list and the parameter sequence ends with an ellipsis. */ bool ellipsis_p; }; /* A declarator. */ struct cp_declarator { /* The kind of declarator. */ cp_declarator_kind kind; /* Attributes that apply to this declarator. */ tree attributes; /* For all but cdk_id and cdk_error, the contained declarator. For cdk_id and cdk_error, guaranteed to be NULL. */ cp_declarator *declarator; location_t id_loc; /* Currently only set for cdk_id. */ union { /* For identifiers. */ struct { /* If non-NULL, the qualifying scope (a NAMESPACE_DECL or *_TYPE) for this identifier. */ tree qualifying_scope; /* The unqualified name of the entity -- an IDENTIFIER_NODE, BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */ tree unqualified_name; /* If this is the name of a function, what kind of special function (if any). */ special_function_kind sfk; } id; /* For functions. */ struct { /* The parameters to the function. */ cp_parameter_declarator *parameters; /* The cv-qualifiers for the function. */ cp_cv_quals qualifiers; /* The exception-specification for the function. */ tree exception_specification; } function; /* For arrays. */ struct { /* The bounds to the array. */ tree bounds; } array; /* For cdk_pointer, cdk_reference, and cdk_ptrmem. */ struct { /* The cv-qualifiers for the pointer. */ cp_cv_quals qualifiers; /* For cdk_ptrmem, the class type containing the member. */ tree class_type; } pointer; } u; }; /* A parameter list indicating for a function with no parameters, e.g "int f(void)". */ extern cp_parameter_declarator *no_parameters; /* in call.c */ extern bool check_dtor_name (tree, tree); extern tree build_vfield_ref (tree, tree); extern tree build_conditional_expr (tree, tree, tree); extern tree build_addr_func (tree); extern tree build_call (tree, tree); extern bool null_ptr_cst_p (tree); extern bool sufficient_parms_p (tree); extern tree type_decays_to (tree); extern tree build_user_type_conversion (tree, tree, int); extern tree build_new_function_call (tree, tree, bool); extern tree build_operator_new_call (tree, tree, tree *, tree *, tree *); extern tree build_new_method_call (tree, tree, tree, tree, int, tree *); extern tree build_special_member_call (tree, tree, tree, tree, int); extern tree build_new_op (enum tree_code, int, tree, tree, tree, bool *); extern tree build_op_delete_call (enum tree_code, tree, tree, bool, tree, tree); extern bool can_convert (tree, tree); extern bool can_convert_arg (tree, tree, tree, int); extern bool can_convert_arg_bad (tree, tree, tree); extern bool enforce_access (tree, tree, tree); extern tree convert_default_arg (tree, tree, tree, int); extern tree convert_arg_to_ellipsis (tree); extern tree build_x_va_arg (tree, tree); extern tree cxx_type_promotes_to (tree); extern tree type_passed_as (tree); extern tree convert_for_arg_passing (tree, tree); extern bool is_properly_derived_from (tree, tree); extern tree initialize_reference (tree, tree, tree, tree *); extern tree make_temporary_var_for_ref_to_temp (tree, tree); extern tree strip_top_quals (tree); extern tree perform_implicit_conversion (tree, tree); extern tree perform_direct_initialization_if_possible (tree, tree, bool); extern tree in_charge_arg_for_name (tree); extern tree build_cxx_call (tree, tree); #ifdef ENABLE_CHECKING extern void validate_conversion_obstack (void); #endif /* ENABLE_CHECKING */ /* in class.c */ extern tree build_base_path (enum tree_code, tree, tree, int); extern tree convert_to_base (tree, tree, bool, bool); extern tree convert_to_base_statically (tree, tree); extern tree build_vtbl_ref (tree, tree); extern tree build_vfn_ref (tree, tree); extern tree get_vtable_decl (tree, int); extern void resort_type_method_vec (void *, void *, gt_pointer_operator, void *); extern bool add_method (tree, tree, tree); extern bool currently_open_class (tree); extern tree currently_open_derived_class (tree); extern tree finish_struct (tree, tree); extern void finish_struct_1 (tree); extern int resolves_to_fixed_type_p (tree, int *); extern void init_class_processing (void); extern int is_empty_class (tree); extern void pushclass (tree); extern void popclass (void); extern void push_nested_class (tree); extern void pop_nested_class (void); extern int current_lang_depth (void); extern void push_lang_context (tree); extern void pop_lang_context (void); extern tree instantiate_type (tree, tree, tsubst_flags_t); extern void print_class_statistics (void); extern void cxx_print_statistics (void); extern void cxx_print_xnode (FILE *, tree, int); extern void cxx_print_decl (FILE *, tree, int); extern void cxx_print_type (FILE *, tree, int); extern void cxx_print_identifier (FILE *, tree, int); extern void cxx_print_error_function (struct diagnostic_context *, const char *); extern void build_self_reference (void); extern int same_signature_p (tree, tree); extern void maybe_add_class_template_decl_list (tree, tree, int); extern void unreverse_member_declarations (tree); extern void invalidate_class_lookup_cache (void); extern void maybe_note_name_used_in_class (tree, tree); extern void note_name_declared_in_class (tree, tree); extern tree get_vtbl_decl_for_binfo (tree); extern void debug_class (tree); extern void debug_thunks (tree); extern tree cp_fold_obj_type_ref (tree, tree); extern void set_linkage_according_to_type (tree, tree); extern void determine_key_method (tree); extern void check_for_override (tree, tree); extern void push_class_stack (void); extern void pop_class_stack (void); /* in cvt.c */ extern tree convert_to_reference (tree, tree, int, int, tree); extern tree convert_from_reference (tree); extern tree force_rvalue (tree); extern tree ocp_convert (tree, tree, int, int); extern tree cp_convert (tree, tree); extern tree convert_to_void (tree, const char */*implicit context*/); extern tree convert_force (tree, tree, int); extern tree build_expr_type_conversion (int, tree, bool); extern tree type_promotes_to (tree); extern tree perform_qualification_conversions (tree, tree); extern void clone_function_decl (tree, int); extern void adjust_clone_args (tree); /* decl.c */ extern tree poplevel (int, int, int); extern void insert_block (tree); extern tree pushdecl (tree); extern tree pushdecl_maybe_friend (tree, bool); extern void cxx_init_decl_processing (void); enum cp_tree_node_structure_enum cp_tree_node_structure (union lang_tree_node *); extern bool cxx_mark_addressable (tree); extern void cxx_push_function_context (struct function *); extern void cxx_pop_function_context (struct function *); extern void maybe_push_cleanup_level (tree); extern void finish_scope (void); extern void push_switch (tree); extern void pop_switch (void); extern tree pushtag (tree, tree, tag_scope); extern tree make_anon_name (void); extern int decls_match (tree, tree); extern tree duplicate_decls (tree, tree, bool); extern tree pushdecl_top_level_maybe_friend (tree, bool); extern tree pushdecl_top_level_and_finish (tree, tree); extern tree declare_local_label (tree); extern tree define_label (location_t, tree); extern void check_goto (tree); extern bool check_omp_return (void); extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t); extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t); extern tree check_for_out_of_scope_variable (tree); extern tree build_library_fn (tree, tree); extern tree build_library_fn_ptr (const char *, tree); extern tree build_cp_library_fn_ptr (const char *, tree); extern tree push_library_fn (tree, tree); extern tree push_void_library_fn (tree, tree); extern tree push_throw_library_fn (tree, tree); extern tree check_tag_decl (cp_decl_specifier_seq *); extern tree shadow_tag (cp_decl_specifier_seq *); extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *); extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *); extern void start_decl_1 (tree, bool); extern void cp_finish_decl (tree, tree, bool, tree, int); extern void finish_decl (tree, tree, tree); extern int cp_complete_array_type (tree *, tree, bool); extern tree build_ptrmemfunc_type (tree); extern tree build_ptrmem_type (tree, tree); /* the grokdeclarator prototype is in decl.h */ extern tree build_this_parm (tree, cp_cv_quals); extern int copy_fn_p (tree); extern tree get_scope_of_declarator (const cp_declarator *); extern void grok_special_member_properties (tree); extern int grok_ctor_properties (tree, tree); extern bool grok_op_properties (tree, bool); extern tree xref_tag (enum tag_types, tree, tag_scope, bool); extern tree xref_tag_from_type (tree, tree, tag_scope); extern bool xref_basetypes (tree, tree); extern tree start_enum (tree); extern void finish_enum (tree); extern void build_enumerator (tree, tree, tree); extern void start_preparsed_function (tree, tree, int); extern int start_function (cp_decl_specifier_seq *, const cp_declarator *, tree); extern tree begin_function_body (void); extern void finish_function_body (tree); extern tree outer_curly_brace_block (tree); extern tree finish_function (int); extern tree start_method (cp_decl_specifier_seq *, const cp_declarator *, tree); extern tree finish_method (tree); extern void maybe_register_incomplete_var (tree); extern void complete_vars (tree); extern void finish_stmt (void); extern void print_other_binding_stack (struct cp_binding_level *); extern void revert_static_member_fn (tree); extern void fixup_anonymous_aggr (tree); extern int check_static_variable_definition (tree, tree); extern tree compute_array_index_type (tree, tree); extern tree check_default_argument (tree, tree); typedef int (*walk_namespaces_fn) (tree, void *); extern int walk_namespaces (walk_namespaces_fn, void *); extern int wrapup_globals_for_namespace (tree, void *); extern tree create_implicit_typedef (tree, tree); extern tree maybe_push_decl (tree); extern tree force_target_expr (tree, tree); extern tree build_target_expr_with_type (tree, tree); extern int local_variable_p (tree); extern int nonstatic_local_decl_p (tree); extern tree register_dtor_fn (tree); extern tmpl_spec_kind current_tmpl_spec_kind (int); extern tree cp_fname_init (const char *, tree *); extern tree builtin_function (const char *name, tree type, int code, enum built_in_class cl, const char *libname, tree attrs); extern tree check_elaborated_type_specifier (enum tag_types, tree, bool); extern void warn_extern_redeclared_static (tree, tree); extern const char *cxx_comdat_group (tree); extern bool cp_missing_noreturn_ok_p (tree); extern void initialize_artificial_var (tree, tree); extern tree check_var_type (tree, tree); extern tree reshape_init (tree, tree); /* in decl2.c */ extern bool check_java_method (tree); extern tree build_memfn_type (tree, tree, cp_cv_quals); extern void maybe_retrofit_in_chrg (tree); extern void maybe_make_one_only (tree); extern void grokclassfn (tree, tree, enum overload_flags); extern tree grok_array_decl (tree, tree); extern tree delete_sanity (tree, tree, bool, int); extern tree check_classfn (tree, tree, tree); extern void check_member_template (tree); extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *, tree, bool, tree, tree); extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *, tree); extern void cplus_decl_attributes (tree *, tree, int); extern void finish_anon_union (tree); extern void cp_finish_file (void); extern tree coerce_new_type (tree); extern tree coerce_delete_type (tree); extern void comdat_linkage (tree); extern void determine_visibility (tree); extern void constrain_class_visibility (tree); extern void update_member_visibility (tree); extern void import_export_decl (tree); extern tree build_cleanup (tree); extern tree build_offset_ref_call_from_tree (tree, tree); extern void check_default_args (tree); extern void mark_used (tree); extern void finish_static_data_member_decl (tree, tree, bool, tree, int); extern tree cp_build_parm_decl (tree, tree); extern tree get_guard (tree); extern tree get_guard_cond (tree); extern tree set_guard (tree); extern tree cxx_callgraph_analyze_expr (tree *, int *, tree); extern void mark_needed (tree); extern bool decl_needed_p (tree); #ifdef KEY extern bool decl_maybe_needed_p (tree); #endif extern void note_vague_linkage_fn (tree); extern tree build_artificial_parm (tree, tree); /* in error.c */ extern void init_error (void); extern const char *type_as_string (tree, int); extern const char *decl_as_string (tree, int); extern const char *expr_as_string (tree, int); extern const char *lang_decl_name (tree, int); extern const char *language_to_string (enum languages); extern const char *class_key_or_enum_as_string (tree); extern void print_instantiation_context (void); /* in except.c */ extern void init_exception_processing (void); extern tree expand_start_catch_block (tree); extern void expand_end_catch_block (void); extern tree build_exc_ptr (void); extern tree build_throw (tree); extern int nothrow_libfn_p (tree); extern void check_handlers (tree); extern void choose_personality_routine (enum languages); extern tree eh_type_info (tree); /* in expr.c */ extern rtx cxx_expand_expr (tree, rtx, enum machine_mode, int, rtx *); extern tree cplus_expand_constant (tree); /* friend.c */ extern int is_friend (tree, tree); extern void make_friend_class (tree, tree, bool); extern void add_friend (tree, tree, bool); extern tree do_friend (tree, tree, tree, tree, enum overload_flags, bool); /* in init.c */ extern tree expand_member_init (tree); extern void emit_mem_initializers (tree); extern tree build_aggr_init (tree, tree, int); extern int is_aggr_type (tree, int); extern tree get_type_value (tree); extern tree build_zero_init (tree, tree, bool); extern tree build_offset_ref (tree, tree, bool); extern tree build_new (tree, tree, tree, tree, int); extern tree build_vec_init (tree, tree, tree, bool, int); extern tree build_delete (tree, tree, special_function_kind, int, int); extern void push_base_cleanups (void); extern tree build_vec_delete (tree, tree, special_function_kind, int); extern tree create_temporary_var (tree); extern void initialize_vtbl_ptrs (tree); extern tree build_java_class_ref (tree); extern tree integral_constant_value (tree); /* in lex.c */ extern void cxx_dup_lang_specific_decl (tree); extern void yyungetc (int, int); extern tree unqualified_name_lookup_error (tree); extern tree unqualified_fn_lookup_error (tree); extern tree build_lang_decl (enum tree_code, tree, tree); extern void retrofit_lang_decl (tree); extern tree copy_decl (tree); extern tree copy_type (tree); extern tree cxx_make_type (enum tree_code); extern tree make_aggr_type (enum tree_code); extern void yyerror (const char *); extern void yyhook (int); extern bool cxx_init (void); extern void cxx_finish (void); /* in method.c */ extern void init_method (void); extern tree make_thunk (tree, bool, tree, tree); extern void finish_thunk (tree); extern void use_thunk (tree, bool); extern void synthesize_method (tree); extern tree lazily_declare_fn (special_function_kind, tree); extern tree skip_artificial_parms_for (tree, tree); extern tree make_alias_for (tree, tree); /* In optimize.c */ extern bool maybe_clone_body (tree); /* in pt.c */ extern void check_template_shadow (tree); extern tree get_innermost_template_args (tree, int); extern void maybe_begin_member_template_processing (tree); extern void maybe_end_member_template_processing (void); extern tree finish_member_template_decl (tree); extern void begin_template_parm_list (void); extern bool begin_specialization (void); extern void reset_specialization (void); extern void end_specialization (void); extern void begin_explicit_instantiation (void); extern void end_explicit_instantiation (void); extern tree check_explicit_specialization (tree, tree, int, int); extern tree process_template_parm (tree, tree, bool); extern tree end_template_parm_list (tree); extern void end_template_decl (void); extern tree push_template_decl (tree); extern tree push_template_decl_real (tree, bool); extern bool redeclare_class_template (tree, tree); extern tree lookup_template_class (tree, tree, tree, tree, int, tsubst_flags_t); extern tree lookup_template_function (tree, tree); extern int uses_template_parms (tree); extern int uses_template_parms_level (tree, int); extern tree instantiate_class_template (tree); extern tree instantiate_template (tree, tree, tsubst_flags_t); extern int fn_type_unification (tree, tree, tree, tree, tree, unification_kind_t, int); extern void mark_decl_instantiated (tree, int); extern int more_specialized_fn (tree, tree, int); extern void do_decl_instantiation (tree, tree); extern void do_type_instantiation (tree, tree, tsubst_flags_t); extern tree instantiate_decl (tree, int, bool); extern int comp_template_parms (tree, tree); extern int template_class_depth (tree); extern int is_specialization_of (tree, tree); extern bool is_specialization_of_friend (tree, tree); extern int comp_template_args (tree, tree); extern tree maybe_process_partial_specialization (tree); extern tree most_specialized_instantiation (tree); extern void print_candidates (tree); extern void instantiate_pending_templates (int); extern tree tsubst_default_argument (tree, tree, tree); extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t, tree, bool, bool); extern tree most_general_template (tree); extern tree get_mostly_instantiated_function_type (tree); extern int problematic_instantiation_changed (void); extern void record_last_problematic_instantiation (void); extern tree current_instantiation (void); extern tree maybe_get_template_decl_from_type_decl (tree); extern int processing_template_parmlist; extern bool dependent_type_p (tree); extern bool any_dependent_template_arguments_p (tree); extern bool dependent_template_p (tree); extern bool dependent_template_id_p (tree, tree); extern bool type_dependent_expression_p (tree); extern bool any_type_dependent_arguments_p (tree); extern bool value_dependent_expression_p (tree); extern bool dependent_omp_for_p (tree, tree, tree, tree); extern tree resolve_typename_type (tree, bool); extern tree template_for_substitution (tree); extern tree build_non_dependent_expr (tree); extern tree build_non_dependent_args (tree); extern bool reregister_specialization (tree, tree, tree); extern tree fold_non_dependent_expr (tree); extern bool explicit_class_specialization_p (tree); /* in repo.c */ extern void init_repo (void); extern int repo_emit_p (tree); extern bool repo_export_class_p (tree); extern void finish_repo (void); /* in rtti.c */ /* A vector of all tinfo decls that haven't been emitted yet. */ extern GTY(()) VEC(tree,gc) *unemitted_tinfo_decls; extern void init_rtti_processing (void); extern tree build_typeid (tree); extern tree get_tinfo_decl (tree); extern tree get_typeid (tree); extern tree build_dynamic_cast (tree, tree); extern void emit_support_tinfos (void); extern bool emit_tinfo_decl (tree); /* in search.c */ extern bool accessible_base_p (tree, tree, bool); extern tree lookup_base (tree, tree, base_access, base_kind *); extern tree dcast_base_hint (tree, tree); extern int accessible_p (tree, tree, bool); extern tree lookup_field_1 (tree, tree, bool); extern tree lookup_field (tree, tree, int, bool); extern int lookup_fnfields_1 (tree, tree); extern int class_method_index_for_fn (tree, tree); extern tree lookup_fnfields (tree, tree, int); extern tree lookup_member (tree, tree, int, bool); extern int look_for_overrides (tree, tree); extern void get_pure_virtuals (tree); extern void maybe_suppress_debug_info (tree); extern void note_debug_info_needed (tree); extern void print_search_statistics (void); extern void reinit_search_statistics (void); extern tree current_scope (void); extern int at_function_scope_p (void); extern bool at_class_scope_p (void); extern bool at_namespace_scope_p (void); extern tree context_for_name_lookup (tree); extern tree lookup_conversions (tree); extern tree binfo_from_vbase (tree); extern tree binfo_for_vbase (tree, tree); extern tree look_for_overrides_here (tree, tree); #define dfs_skip_bases ((tree)1) extern tree dfs_walk_all (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree dfs_walk_once (tree, tree (*) (tree, void *), tree (*) (tree, void *), void *); extern tree binfo_via_virtual (tree, tree); extern tree build_baselink (tree, tree, tree, tree); extern tree adjust_result_of_qualified_name_lookup (tree, tree, tree); extern tree copied_binfo (tree, tree); extern tree original_binfo (tree, tree); extern int shared_member_p (tree); /* The representation of a deferred access check. */ typedef struct deferred_access_check GTY(()) { /* The base class in which the declaration is referenced. */ tree binfo; /* The declaration whose access must be checked. */ tree decl; /* The declaration that should be used in the error message. */ tree diag_decl; } deferred_access_check; DEF_VEC_O(deferred_access_check); DEF_VEC_ALLOC_O(deferred_access_check,gc); /* in semantics.c */ extern void push_deferring_access_checks (deferring_kind); extern void resume_deferring_access_checks (void); extern void stop_deferring_access_checks (void); extern void pop_deferring_access_checks (void); extern VEC (deferred_access_check,gc)* get_deferred_access_checks (void); extern void pop_to_parent_deferring_access_checks (void); extern void perform_access_checks (VEC (deferred_access_check,gc)*); extern void perform_deferred_access_checks (void); extern void perform_or_defer_access_check (tree, tree, tree); extern int stmts_are_full_exprs_p (void); extern void init_cp_semantics (void); extern tree do_poplevel (tree); extern void add_decl_expr (tree); extern tree finish_expr_stmt (tree); extern tree begin_if_stmt (void); extern void finish_if_stmt_cond (tree, tree); extern tree finish_then_clause (tree); extern void begin_else_clause (tree); extern void finish_else_clause (tree); extern void finish_if_stmt (tree); extern tree begin_while_stmt (void); extern void finish_while_stmt_cond (tree, tree); extern void finish_while_stmt (tree); extern tree begin_do_stmt (void); extern void finish_do_body (tree); extern void finish_do_stmt (tree, tree); extern tree finish_return_stmt (tree); extern tree begin_for_stmt (void); extern void finish_for_init_stmt (tree); extern void finish_for_cond (tree, tree); extern void finish_for_expr (tree, tree); extern void finish_for_stmt (tree); extern tree finish_break_stmt (void); extern tree finish_continue_stmt (void); extern tree begin_switch_stmt (void); extern void finish_switch_cond (tree, tree); extern void finish_switch_stmt (tree); extern tree finish_case_label (tree, tree); extern tree finish_goto_stmt (tree); extern tree begin_try_block (void); extern void finish_try_block (tree); extern tree begin_eh_spec_block (void); extern void finish_eh_spec_block (tree, tree); extern void finish_handler_sequence (tree); extern tree begin_function_try_block (tree *); extern void finish_function_try_block (tree); extern void finish_function_handler_sequence (tree, tree); extern void finish_cleanup_try_block (tree); extern tree begin_handler (void); extern void finish_handler_parms (tree, tree); extern void finish_handler (tree); extern void finish_cleanup (tree, tree); enum { BCS_NO_SCOPE = 1, BCS_TRY_BLOCK = 2, BCS_FN_BODY = 4 }; extern tree begin_compound_stmt (unsigned int); extern void finish_compound_stmt (tree); extern tree finish_asm_stmt (int, tree, tree, tree, tree); extern tree finish_label_stmt (tree); extern void finish_label_decl (tree); extern tree finish_parenthesized_expr (tree); extern tree finish_non_static_data_member (tree, tree, tree); extern tree begin_stmt_expr (void); extern tree finish_stmt_expr_expr (tree, tree); extern tree finish_stmt_expr (tree, bool); extern tree perform_koenig_lookup (tree, tree); extern tree finish_call_expr (tree, tree, bool, bool); extern tree finish_increment_expr (tree, enum tree_code); extern tree finish_this_expr (void); extern tree finish_pseudo_destructor_expr (tree, tree, tree); extern tree finish_unary_op_expr (enum tree_code, tree); extern tree finish_compound_literal (tree, VEC(constructor_elt,gc) *); extern tree finish_fname (tree); extern void finish_translation_unit (void); extern tree finish_template_type_parm (tree, tree); extern tree finish_template_template_parm (tree, tree); extern tree begin_class_definition (tree, tree); extern void finish_template_decl (tree); extern tree finish_template_type (tree, tree, int); extern tree finish_base_specifier (tree, tree, bool); extern void finish_member_declaration (tree); extern void qualified_name_lookup_error (tree, tree, tree); extern void check_template_keyword (tree); extern tree finish_id_expression (tree, tree, tree, cp_id_kind *, bool, bool, bool *, bool, bool, bool, bool, const char **); extern tree finish_typeof (tree); extern tree finish_offsetof (tree); extern void finish_decl_cleanup (tree, tree); extern void finish_eh_cleanup (tree); extern void expand_body (tree); extern void finish_mem_initializers (tree); extern tree check_template_template_default_arg (tree); extern void expand_or_defer_fn (tree); extern void check_accessibility_of_qualified_id (tree, tree, tree); extern tree finish_qualified_id_expr (tree, tree, bool, bool, bool, bool); extern void simplify_aggr_init_expr (tree *); extern void finalize_nrv (tree *, tree, tree); extern void note_decl_for_pch (tree); extern tree finish_omp_clauses (tree); extern void finish_omp_threadprivate (tree); extern tree begin_omp_structured_block (void); extern tree finish_omp_structured_block (tree); extern tree begin_omp_parallel (void); extern tree finish_omp_parallel (tree, tree); extern tree begin_omp_task (void); extern tree finish_omp_task (tree, tree); extern tree finish_omp_for (location_t, tree, tree, tree, tree, tree, tree, tree); extern void finish_omp_atomic (enum tree_code, tree, tree); extern void finish_omp_barrier (void); extern void finish_omp_flush (void); extern void finish_omp_taskwait (void); extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree); extern tree cxx_omp_clause_default_ctor (tree, tree, tree); extern tree cxx_omp_clause_copy_ctor (tree, tree, tree); extern tree cxx_omp_clause_assign_op (tree, tree, tree); extern void cxx_omp_finish_clause (tree); extern tree cxx_omp_clause_dtor (tree, tree); extern bool cxx_omp_privatize_by_reference (tree); extern bool cxx_omp_create_clause_info (tree, tree, bool, bool, bool); extern tree baselink_for_fns (tree); /* in tree.c */ extern void lang_check_failed (const char *, int, const char *) ATTRIBUTE_NORETURN; extern tree stabilize_expr (tree, tree *); extern void stabilize_call (tree, tree *); extern bool stabilize_init (tree, tree *); extern tree add_stmt_to_compound (tree, tree); extern tree cxx_maybe_build_cleanup (tree); extern void init_tree (void); extern int pod_type_p (tree); extern int zero_init_p (tree); extern tree canonical_type_variant (tree); extern tree copy_binfo (tree, tree, tree, tree *, int); extern int member_p (tree); extern cp_lvalue_kind real_lvalue_p (tree); extern bool builtin_valid_in_constant_expr_p (tree); extern tree build_min (enum tree_code, tree, ...); extern tree build_min_nt (enum tree_code, ...); extern tree build_min_non_dep (enum tree_code, tree, ...); extern tree build_cplus_new (tree, tree); extern tree get_target_expr (tree); extern tree build_cplus_array_type (tree, tree); extern tree hash_tree_cons (tree, tree, tree); extern tree hash_tree_chain (tree, tree); extern tree build_qualified_name (tree, tree, tree, bool); extern int is_overloaded_fn (tree); extern tree get_first_fn (tree); extern tree ovl_cons (tree, tree); extern tree build_overload (tree, tree); extern const char *cxx_printable_name (tree, int); extern tree build_exception_variant (tree, tree); extern tree bind_template_template_parm (tree, tree); extern tree array_type_nelts_total (tree); extern tree array_type_nelts_top (tree); extern tree break_out_target_exprs (tree); extern tree get_type_decl (tree); extern tree decl_namespace_context (tree); extern bool decl_anon_ns_mem_p (tree); extern tree lvalue_type (tree); extern tree error_type (tree); extern int varargs_function_p (tree); extern bool really_overloaded_fn (tree); extern bool cp_tree_equal (tree, tree); extern tree no_linkage_check (tree, bool); extern void debug_binfo (tree); extern tree build_dummy_object (tree); extern tree maybe_dummy_object (tree, tree *); extern int is_dummy_object (tree); extern const struct attribute_spec cxx_attribute_table[]; extern tree make_ptrmem_cst (tree, tree); extern tree cp_build_type_attribute_variant (tree, tree); extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t); #define cp_build_qualified_type(TYPE, QUALS) \ cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error) extern special_function_kind special_function_p (tree); extern int count_trees (tree); extern int char_type_p (tree); extern void verify_stmt_tree (tree); extern linkage_kind decl_linkage (tree); extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn, void*, struct pointer_set_t*); extern int cp_cannot_inline_tree_fn (tree*); extern tree cp_add_pending_fn_decls (void*,tree); extern int cp_auto_var_in_fn_p (tree,tree); extern tree fold_if_not_in_template (tree); extern tree rvalue (tree); extern tree convert_bitfield_to_declared_type (tree); extern tree cp_save_expr (tree); extern bool cast_valid_in_integral_constant_expression_p (tree); /* in typeck.c */ extern int string_conv_p (tree, tree, int); extern tree cp_truthvalue_conversion (tree); extern tree condition_conversion (tree); extern tree require_complete_type (tree); extern tree complete_type (tree); extern tree complete_type_or_else (tree, tree); extern int type_unknown_p (tree); extern bool comp_except_specs (tree, tree, bool); extern bool comptypes (tree, tree, int); extern bool compparms (tree, tree); extern int comp_cv_qualification (tree, tree); extern int comp_cv_qual_signature (tree, tree); extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code); extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool); #define cxx_sizeof_nowarn(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, false) extern tree inline_conversion (tree); extern tree is_bitfield_expr_with_lowered_type (tree); extern tree unlowered_expr_type (tree); extern tree decay_conversion (tree); extern tree build_class_member_access_expr (tree, tree, tree, bool); extern tree finish_class_member_access_expr (tree, tree, bool); extern tree build_x_indirect_ref (tree, const char *); extern tree build_indirect_ref (tree, const char *); extern tree build_array_ref (tree, tree); extern tree get_member_function_from_ptrfunc (tree *, tree); extern tree build_x_binary_op (enum tree_code, tree, tree, bool *); extern tree build_x_unary_op (enum tree_code, tree); extern tree unary_complex_lvalue (enum tree_code, tree); extern tree build_x_conditional_expr (tree, tree, tree); extern tree build_x_compound_expr_from_list (tree, const char *); extern tree build_x_compound_expr (tree, tree); extern tree build_compound_expr (tree, tree); extern tree build_static_cast (tree, tree); extern tree build_reinterpret_cast (tree, tree); extern tree build_const_cast (tree, tree); extern tree build_c_cast (tree, tree); extern tree build_x_modify_expr (tree, enum tree_code, tree); extern tree cp_build_modify_expr (tree, enum tree_code, tree); extern tree build_modify_expr (tree, enum tree_code, tree); extern tree convert_for_initialization (tree, tree, tree, int, const char *, tree, int); extern int comp_ptr_ttypes (tree, tree); extern bool comp_ptr_ttypes_const (tree, tree); extern int ptr_reasonably_similar (tree, tree); extern tree build_ptrmemfunc (tree, tree, int, bool); extern int cp_type_quals (tree); extern bool cp_type_readonly (tree); extern bool cp_has_mutable_p (tree); extern bool at_least_as_qualified_p (tree, tree); extern void cp_apply_type_quals_to_decl (int, tree); extern tree build_ptrmemfunc1 (tree, tree, tree); extern void expand_ptrmemfunc_cst (tree, tree *, tree *); extern tree type_after_usual_arithmetic_conversions (tree, tree); extern tree composite_pointer_type (tree, tree, tree, tree, const char*); extern tree merge_types (tree, tree); extern tree check_return_expr (tree, bool *); #define cp_build_binary_op(code, arg1, arg2) \ build_binary_op(code, arg1, arg2, 1) #define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, true) extern tree build_ptrmemfunc_access_expr (tree, tree); extern tree build_address (tree); extern tree build_nop (tree, tree); extern tree non_reference (tree); extern tree lookup_anon_field (tree, tree); extern bool invalid_nonstatic_memfn_p (tree); extern tree convert_member_func_to_ptr (tree, tree); extern tree convert_ptrmem (tree, tree, bool, bool); extern int lvalue_or_else (tree, enum lvalue_use); extern int lvalue_p (tree); /* in typeck2.c */ extern void require_complete_eh_spec_types (tree, tree); extern void cxx_incomplete_type_diagnostic (tree, tree, int); #undef cxx_incomplete_type_error extern void cxx_incomplete_type_error (tree, tree); #define cxx_incomplete_type_error(V,T) \ (cxx_incomplete_type_diagnostic ((V), (T), 0)) extern tree error_not_base_type (tree, tree); extern tree binfo_or_else (tree, tree); extern void readonly_error (tree, const char *, int); extern void complete_type_check_abstract (tree); extern int abstract_virtuals_error (tree, tree); extern tree store_init_value (tree, tree); extern tree digest_init (tree, tree); extern tree build_scoped_ref (tree, tree, tree *); extern tree build_x_arrow (tree); extern tree build_m_component_ref (tree, tree); extern tree build_functional_cast (tree, tree); extern tree add_exception_specifier (tree, tree, int); extern tree merge_exception_specifiers (tree, tree); /* in mangle.c */ extern void init_mangle (void); extern void mangle_decl (const tree); extern const char *mangle_type_string (tree); extern tree mangle_typeinfo_for_type (tree); extern tree mangle_typeinfo_string_for_type (tree); extern tree mangle_vtbl_for_type (tree); extern tree mangle_vtt_for_type (tree); extern tree mangle_ctor_vtbl_for_type (tree, tree); extern tree mangle_thunk (tree, int, tree, tree); extern tree mangle_conv_op_name_for_type (tree); extern tree mangle_guard_variable (tree); extern tree mangle_ref_init_variable (tree); /* in dump.c */ extern bool cp_dump_tree (void *, tree); /* In cp/cp-objcp-common.c. */ extern HOST_WIDE_INT cxx_get_alias_set (tree); extern bool cxx_warn_unused_global_decl (tree); extern tree cp_expr_size (tree); extern size_t cp_tree_size (enum tree_code); extern bool cp_var_mod_type_p (tree, tree); extern void cxx_initialize_diagnostics (struct diagnostic_context *); extern int cxx_types_compatible_p (tree, tree); extern void init_shadowed_var_for_decl (void); extern tree cxx_staticp (tree); /* in cp-gimplify.c */ extern int cp_gimplify_expr (tree *, tree *, tree *); extern void cp_genericize (tree); /* -- end of C++ */ /* In order for the format checking to accept the C++ frontend diagnostic framework extensions, you must include this file before toplev.h, not after. We override the definition of GCC_DIAG_STYLE in c-common.h. */ #undef GCC_DIAG_STYLE #define GCC_DIAG_STYLE __gcc_cxxdiag__ #if GCC_VERSION >= 4001 #define ATTRIBUTE_GCC_CXXDIAG(m, n) __attribute__ ((__format__ (GCC_DIAG_STYLE, m, n))) ATTRIBUTE_NONNULL(m) #else #define ATTRIBUTE_GCC_CXXDIAG(m, n) ATTRIBUTE_NONNULL(m) #endif extern void cp_cpp_error (cpp_reader *, int, const char *, va_list *) ATTRIBUTE_GCC_CXXDIAG(3,0); #endif /* ! GCC_CP_TREE_H */
GridInit.c
#include "XSbench_header.h" #ifdef MPI #include<mpi.h> #endif // Generates randomized energy grid for each nuclide // Note that this is done as part of initialization (serial), so // rand() is used. void generate_grids( NuclideGridPoint ** nuclide_grids, long n_isotopes, long n_gridpoints ) { for( long i = 0; i < n_isotopes; i++ ) for( long j = 0; j < n_gridpoints; j++ ) { nuclide_grids[i][j].energy =((double)rand()/(double)RAND_MAX); nuclide_grids[i][j].total_xs =((double)rand()/(double)RAND_MAX); nuclide_grids[i][j].elastic_xs =((double)rand()/(double)RAND_MAX); nuclide_grids[i][j].absorbtion_xs=((double)rand()/(double)RAND_MAX); nuclide_grids[i][j].fission_xs =((double)rand()/(double)RAND_MAX); nuclide_grids[i][j].nu_fission_xs=((double)rand()/(double)RAND_MAX); } } // Verification version of this function (tighter control over RNG) void generate_grids_v( NuclideGridPoint ** nuclide_grids, long n_isotopes, long n_gridpoints ) { for( long i = 0; i < n_isotopes; i++ ) for( long j = 0; j < n_gridpoints; j++ ) { nuclide_grids[i][j].energy = rn_v(); nuclide_grids[i][j].total_xs = rn_v(); nuclide_grids[i][j].elastic_xs = rn_v(); nuclide_grids[i][j].absorbtion_xs= rn_v(); nuclide_grids[i][j].fission_xs = rn_v(); nuclide_grids[i][j].nu_fission_xs= rn_v(); } } // Sorts the nuclide grids by energy (lowest -> highest) void sort_nuclide_grids( NuclideGridPoint ** nuclide_grids, long n_isotopes, long n_gridpoints ) { int (*cmp) (const void *, const void *); cmp = NGP_compare; for( long i = 0; i < n_isotopes; i++ ) qsort( nuclide_grids[i], n_gridpoints, sizeof(NuclideGridPoint), cmp ); // error debug check /* for( int i = 0; i < n_isotopes; i++ ) { printf("NUCLIDE %d ==============================\n", i); for( int j = 0; j < n_gridpoints; j++ ) printf("E%d = %lf\n", j, nuclide_grids[i][j].energy); } */ } // Allocates unionized energy grid, and assigns union of energy levels // from nuclide grids to it. GridPoint * generate_energy_grid( long n_isotopes, long n_gridpoints, NuclideGridPoint ** nuclide_grids) { int mype = 0; #ifdef MPI MPI_Comm_rank(MPI_COMM_WORLD, &mype); #endif if( mype == 0 ) printf("Generating Unionized Energy Grid...\n"); long n_unionized_grid_points = n_isotopes*n_gridpoints; int (*cmp) (const void *, const void *); cmp = NGP_compare; GridPoint * energy_grid = (GridPoint *)malloc( n_unionized_grid_points * sizeof( GridPoint ) ); if( mype == 0 ) printf("Copying and Sorting all nuclide grids...\n"); NuclideGridPoint ** n_grid_sorted = gpmatrix( n_isotopes, n_gridpoints ); memcpy( n_grid_sorted[0], nuclide_grids[0], n_isotopes*n_gridpoints* sizeof( NuclideGridPoint ) ); qsort( &n_grid_sorted[0][0], n_unionized_grid_points, sizeof(NuclideGridPoint), cmp); if( mype == 0 ) printf("Assigning energies to unionized grid...\n"); for( long i = 0; i < n_unionized_grid_points; i++ ) energy_grid[i].energy = n_grid_sorted[0][i].energy; gpmatrix_free(n_grid_sorted); int * full = (int *) malloc( n_isotopes * n_unionized_grid_points * sizeof(int) ); if( full == NULL ) { fprintf(stderr,"ERROR - Out Of Memory!\n"); exit(1); } for( long i = 0; i < n_unionized_grid_points; i++ ) energy_grid[i].xs_ptrs = &full[n_isotopes * i]; // debug error checking /* for( int i = 0; i < n_unionized_grid_points; i++ ) printf("E%d = %lf\n", i, energy_grid[i].energy); */ return energy_grid; } // Searches each nuclide grid for the closest energy level and assigns // pointer from unionized grid to the correct spot in the nuclide grid. // This process is time consuming, as the number of binary searches // required is: binary searches = n_gridpoints * n_isotopes^2 void set_grid_ptrs( GridPoint * energy_grid, NuclideGridPoint ** nuclide_grids, long n_isotopes, long n_gridpoints ) { int mype = 0; #ifdef MPI MPI_Comm_rank(MPI_COMM_WORLD, &mype); #endif if( mype == 0 ) printf("Assigning pointers to Unionized Energy Grid...\n"); #pragma omp parallel for default(none) \ shared( energy_grid, nuclide_grids, n_isotopes, n_gridpoints, mype ) for( long i = 0; i < n_isotopes * n_gridpoints ; i++ ) { double quarry = energy_grid[i].energy; if( INFO && mype == 0 && omp_get_thread_num() == 0 && i % 200 == 0 ) printf("\rAligning Unionized Grid...(%.0lf%% complete)", 100.0 * (double) i / (n_isotopes*n_gridpoints / omp_get_num_threads()) ); for( long j = 0; j < n_isotopes; j++ ) { // j is the nuclide i.d. // log n binary search energy_grid[i].xs_ptrs[j] = binary_search( nuclide_grids[j], quarry, n_gridpoints); } } if( mype == 0 ) printf("\n"); //test /* for( int i=0; i < n_isotopes * n_gridpoints; i++ ) for( int j = 0; j < n_isotopes; j++ ) printf("E = %.4lf\tNuclide %d->%p->%.4lf\n", energy_grid[i].energy, j, energy_grid[i].xs_ptrs[j], (energy_grid[i].xs_ptrs[j])->energy ); */ }
ransac.h
#ifndef _EAGLEEYE_RANSAC_H_ #define _EAGLEEYE_RANSAC_H_ #include "eagleeye/common/EagleeyeMacro.h" #include "eagleeye/common/EagleeyeLog.h" #include "eagleeye/common/EagleeyeTime.h" #include "eagleeye/basic/Matrix.h" #include "eagleeye/basic/MatrixMath.h" #include <iostream> #include <cmath> #include <string> #include <random> #include <memory> #include <algorithm> #include <vector> #include <omp.h> namespace eagleeye{ // Each abstract model is made of abstract parameters // Could be anything from a point (that make a 2D line or 3D plane or image correspondences) to a line class AbstractParameter{ public: virtual ~AbstractParameter(void) {}; // To make this polymorphic we add dummy destructor }; // Abstract model type for generic RANSAC model fitting template <int t_NumParams, int t_ModelSize> /* Minimum number of parameters required to define this model*/ class AbstractModel{ public: static const int ModelParamNum = t_NumParams; static const int ModelSize = t_ModelSize; AbstractModel(){ } protected: std::array<std::shared_ptr<AbstractParameter>, t_NumParams> m_MinModelParams; public: virtual Matrix<float> build(const std::vector<std::shared_ptr<AbstractParameter>> &InputParams) = 0; virtual Matrix<float> evaluate(const std::vector<std::shared_ptr<AbstractParameter>> &EvaluateParams, Matrix<float> MultiH,float Threshold) = 0; virtual std::array<std::shared_ptr<AbstractParameter>, t_NumParams> getModelParams(void) { return m_MinModelParams; }; }; // T - AbstractModel template <class T> class RANSAC{ private: const int t_NumParams = T::ModelParamNum; std::vector<std::shared_ptr<AbstractParameter>> m_Data; // All the data Matrix<float> m_BestModel; // Pointer to the best model, valid only after Estimate() is called std::vector<std::shared_ptr<AbstractParameter>> m_BestInliers; int m_MaxIterations; // Number of iterations before termination float m_Threshold; // The threshold for computing model consensus float m_BestModelScore; // The score of the best model int m_BestModelIdx; std::vector<std::mt19937> m_RandEngines; // Mersenne twister high quality RNG that support *OpenMP* multi-threading std::shared_ptr<T> m_RandomModel; int m_nThreads; public: RANSAC(void){ m_nThreads = std::max(1, omp_get_max_threads()); EAGLEEYE_LOGD("RANSAC Maximum usable threads: %d", m_nThreads); for (int i = 0; i < m_nThreads; ++i){ std::random_device SeedDevice; m_RandEngines.push_back(std::mt19937(SeedDevice())); } m_RandomModel = std::make_shared<T>(); reset(); }; virtual ~RANSAC(void) {}; void reset(void){ // Clear sampled models, etc. and prepare for next call. Reset RANSAC estimator state m_Data.clear(); m_BestModelIdx = -1; m_BestModelScore = 0.0; }; void initialize(float Threshold, int MaxIterations = 1000){ m_Threshold = Threshold; m_MaxIterations = MaxIterations; }; Matrix<float> getBestModel() { return m_BestModel; }; const std::vector<std::shared_ptr<AbstractParameter>>& getBestInliers(void) { return m_BestInliers; }; bool estimate(const std::vector<std::shared_ptr<AbstractParameter>> &Data){ if (Data.size() <= t_NumParams){ EAGLEEYE_LOGE("RANSAC - Number of data points is too less. Not doing anything."); return false; } m_Data = Data; int DataSize = m_Data.size(); std::vector<Matrix<float>> ransac_models(m_MaxIterations); EAGLEEYE_TIME_START(ini_ransac_models); omp_set_dynamic(0); // Explicitly disable dynamic teams omp_set_num_threads(m_nThreads); #pragma omp parallel for for(int i=0; i<m_MaxIterations; ++i){ int thread_id = omp_get_thread_num(); // Select t_NumParams random samples std::vector<std::shared_ptr<AbstractParameter>> RandomSamples(t_NumParams); std::vector<std::shared_ptr<AbstractParameter>> RemainderSamples = m_Data; // Without the chosen random samples std::shuffle(RemainderSamples.begin(), RemainderSamples.end(), m_RandEngines[thread_id]); // To avoid picking the same element more than once std::copy(RemainderSamples.begin(), RemainderSamples.begin() + t_NumParams, RandomSamples.begin()); Matrix<float> M = m_RandomModel->build(RandomSamples); ransac_models[i] = M.flatten(); } EAGLEEYE_TIME_END(ini_ransac_models); Matrix<float> ransac_models_mat = concat(ransac_models, 0); Matrix<float> ransac_inlier_fractions = m_RandomModel->evaluate(m_Data, ransac_models_mat, m_Threshold); for (int i = 0; i < m_MaxIterations; ++i){ // Check if the sampled model is the best so far if (ransac_inlier_fractions.at(0,i) > m_BestModelScore){ m_BestModelScore = ransac_inlier_fractions.at(0,i); m_BestModel = ransac_models_mat(Range(i,i+1),Range(0,9)); } } m_BestModel = m_BestModel.clone(); EAGLEEYE_LOGD("BestInlierFraction: %f",m_BestModelScore); reset(); return true; }; }; } #endif
dropout_op.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <cstring> #include <random> #include <string> #include <algorithm> #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/aligned_vector.h" #include "paddle/fluid/platform/gpu_launch_config.h" namespace paddle { namespace operators { #if defined(__NVCC__) || defined(__HIPCC__) template <typename T, typename MaskType, int VecSize> __global__ void DropoutGradCUDAKernel(const T* dout, const MaskType* mask, const T factor, const int64_t size, T* dx) { using LoadT = platform::AlignedVector<T, VecSize>; using MaskLoadT = platform::AlignedVector<MaskType, VecSize>; int64_t idx = blockDim.x * blockIdx.x + threadIdx.x; for (int i = idx * VecSize; i < size; i += blockDim.x * gridDim.x * VecSize) { LoadT dout_val; platform::Load<T, VecSize>(&dout[i], &dout_val); MaskLoadT mask_val; platform::Load<MaskType, VecSize>(&mask[i], &mask_val); LoadT dx_val; #pragma unroll for (int j = 0; j < VecSize; j++) { dx_val[j] = dout_val[j] * static_cast<T>(mask_val[j]) * factor; } platform::Store<T, VecSize>(dx_val, &dx[i]); } } #endif using Tensor = framework::Tensor; template <typename T, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>; template <typename T, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenVector = framework::EigenVector<T, MajorType, IndexType>; template <typename DeviceContext, typename T> class CPUDropoutKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* x = context.Input<Tensor>("X"); auto* seed = context.HasInput("Seed") ? context.Input<Tensor>("Seed") : nullptr; auto* y = context.Output<Tensor>("Out"); const auto* x_data = x->data<T>(); auto* y_data = y->mutable_data<T>(context.GetPlace()); float dropout_prob = context.Attr<float>("dropout_prob"); auto& dropout_implementation = context.Attr<std::string>("dropout_implementation"); bool upscale_in_train = (dropout_implementation == "upscale_in_train"); if (!context.Attr<bool>("is_test")) { auto* mask = context.Output<Tensor>("Mask"); auto* mask_data = mask->mutable_data<uint8_t>(context.GetPlace()); size_t size = framework::product(mask->dims()); // Special case when dropout_prob is 1.0 if (dropout_prob == 1.0f) { std::memset(y_data, 0, size * sizeof(*y_data)); // NOLINT std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT return; } // std::minstd_rand engine; // NOTE: fixed seed should only be used in unittest or for debug. // Guarantee to use random seed in training. int seed_data = 0; if (seed) { seed_data = *(seed->data<int>()); } else { seed_data = context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : 0; } auto engine = framework::GetCPURandomEngine(seed_data); std::uniform_real_distribution<float> dist(0, 1); for (size_t i = 0; i < size; ++i) { if (dist(*engine) < dropout_prob) { mask_data[i] = 0; y_data[i] = 0; } else { mask_data[i] = 1; if (upscale_in_train) { y_data[i] = x_data[i] / static_cast<T>(1.0f - dropout_prob); } else { y_data[i] = x_data[i]; } } } } else { if (upscale_in_train) { const auto* X_data = x->data<T>(); auto* Y_data = y->mutable_data<T>(context.GetPlace()); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < x->numel(); i++) { Y_data[i] = X_data[i]; } } else { auto X = EigenMatrix<T>::Reshape(*x, 1); auto Y = EigenMatrix<T>::Reshape(*y, 1); auto& place = *context.template device_context<DeviceContext>().eigen_device(); Y.device(place) = X * static_cast<T>(1.0f - dropout_prob); } } } }; template <typename DeviceContext, typename T> class DropoutGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* grad_x = context.Output<Tensor>(framework::GradVarName("X")); auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out")); auto* mask = context.Input<Tensor>("Mask"); grad_x->mutable_data<T>(context.GetPlace()); auto size = grad_x->numel(); auto dX = EigenVector<T>::Flatten(*grad_x); auto dY = EigenVector<T>::Flatten(*grad_y); auto& place = *context.template device_context<DeviceContext>().eigen_device(); auto& dropout_implementation = context.Attr<std::string>("dropout_implementation"); if (context.Attr<bool>("is_test") == true) { if (dropout_implementation == "upscale_in_train") { dX.device(place) = static_cast<T>(1) * dY; } else { float dropout_prob = context.Attr<float>("dropout_prob"); dX.device(place) = dY * static_cast<T>(1.0f - dropout_prob); } } else { auto M = EigenVector<uint8_t>::Flatten(*mask); if (dropout_implementation == "upscale_in_train") { float dropout_prob = context.Attr<float>("dropout_prob"); if (dropout_prob == 1.0f) { dX.device(place) = static_cast<T>(0) * dY; } else { int vec_size = platform::GetVectorizedSize<T>(grad_y->data<T>()); if (platform::is_gpu_place(context.GetPlace()) && vec_size == 4 && size % 4 == 0) { #if defined(__NVCC__) || defined(__HIPCC__) auto factor = static_cast<T>(1.0f / (1.0f - dropout_prob)); auto stream = context.cuda_device_context().stream(); platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D( context.cuda_device_context(), size); DropoutGradCUDAKernel<T, uint8_t, 4><<< config.block_per_grid, config.thread_per_block, 0, stream>>>( grad_y->data<T>(), mask->data<uint8_t>(), factor, size, grad_x->data<T>()); #endif } else { dX.device(place) = dY * M.cast<T>() / static_cast<T>(1.0f - dropout_prob); } } } else { dX.device(place) = dY * M.cast<T>(); } } } }; } // namespace operators } // namespace paddle
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColourLookup { DDSSourceBlock sources[2]; } DDSSingleColourLookup; typedef MagickBooleanType DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *); typedef MagickBooleanType DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *); static const DDSSingleColourLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColourLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColourLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ /* Forward declarations */ static MagickBooleanType ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3, DDSVector4 *, DDSVector4 *, unsigned char *, size_t), ReadDDSInfo(Image *,DDSInfo *), ReadDXT1(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadDXT3(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadDXT5(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *), ReadUncompressedRGB(const ImageInfo *,Image *,DDSInfo *, const MagickBooleanType,ExceptionInfo *), ReadUncompressedRGBA(const ImageInfo *,Image *,DDSInfo *, const MagickBooleanType,ExceptionInfo *), SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *), WriteMipmaps(Image *,const ImageInfo*,const size_t,const size_t,const size_t, const MagickBooleanType,const MagickBooleanType,const MagickBooleanType, ExceptionInfo *); static void RemapIndices(const ssize_t *,const unsigned char *,unsigned char *), WriteDDSInfo(Image *,const size_t,const size_t,const size_t), WriteFourCC(Image *,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *), WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *), WriteUncompressed(Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3* end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4* points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColourLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z))); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { const char *option; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; Image *image; MagickBooleanType status, cubemap, volume, read_mipmaps; PixelTrait alpha_trait; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cubemap=MagickFalse, volume=MagickFalse, read_mipmaps=MagickFalse; image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if ((num_images == 0) || (num_images > GetBlobSize(image))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse) ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit"); option=GetImageOption(image_info,"dds:skip-mipmaps"); if (IsStringFalse(option) != MagickFalse) read_mipmaps=MagickTrue; for (n = 0; n < num_images; n++) { if (n != 0) { /* Start a new image */ if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->alpha_trait=alpha_trait; image->compression=compression; image->columns=dds_info.width; image->rows=dds_info.height; image->storage_class=DirectClass; image->endian=LSBEndian; image->depth=8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); (void) SetImageBackgroundColor(image,exception); status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception); if (status == MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y, DDSColors colors,size_t bits,Quantum *q) { register ssize_t i; ssize_t j; unsigned char code; for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q); if ((colors.a[code] != 0) && (image->alpha_trait == UndefinedPixelTrait)) return(MagickFalse); q+=GetPixelChannels(image); } } } return(MagickTrue); } static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception) { MagickBooleanType status; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } status=MagickTrue; if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(MagickFalse); image=SyncNextImageInList(image); status=SetImageExtent(image,w,h,exception); if (status == MagickFalse) break; status=decoder(image,dds_info,exception); if (status == MagickFalse) break; if ((w == 1) && (h == 1)) break; w=DIV2(w); h=DIV2(h); } } return(status); } static MagickBooleanType ReadDXT1Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t x; size_t bits; ssize_t y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 8 bytes of data from the image */ c0=ReadBlobLSBShort(image); c1=ReadBlobLSBShort(image); bits=ReadBlobLSBLong(image); CalculateColors(c0,c1,&colors,MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse) { /* Correct alpha */ SetImageAlpha(image,QuantumRange,exception); q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q != (Quantum *) NULL) SetDXT1Pixels(image,x,y,colors,bits,q); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; MagickSizeType alpha_bits; register Quantum *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGBPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { register Quantum *q; ssize_t x, y; unsigned short color; for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(image,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType,exception); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBAPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { register Quantum *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleAlphaType,exception); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else if (alphaBits == 2) { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (color >> 8)),q); SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q); } else { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255)),q); } } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; register ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)w*h*pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteCompressed(Image *image, const size_t count, DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if ((clusterFit == MagickFalse) || (count == 0)) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image, ExceptionInfo *exception) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, fromlist, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace,exception); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (image->alpha_trait == UndefinedPixelTrait) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } mipmaps=0; fromlist=MagickFalse; option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) { if (LocaleNCompare(option,"fromlist",8) == 0) { Image *next; fromlist=MagickTrue; next=image->next; while(next != (Image *) NULL) { mipmaps++; next=next->next; } } } if ((mipmaps == 0) && ((image->columns & (image->columns - 1)) == 0) && ((image->rows & (image->rows - 1)) == 0)) { maxMipmaps=SIZE_MAX; if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } WriteDDSInfo(image,pixelFormat,compression,mipmaps); WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, exception); if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression, mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse)) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MagickPathExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) memset(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) /* bitcount / masks */ (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->alpha_trait != UndefinedPixelTrait) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) /* ddscaps2 + reserved region */ (void) WriteBlobLSBLong(image,0x00); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register ssize_t x; ssize_t i, y, bx, by; register const Quantum *p; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const Quantum *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(image,p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p+=GetPixelChannels(image); match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char *indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info, const size_t pixelFormat,const size_t compression,const size_t mipmaps, const MagickBooleanType fromlist,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,ExceptionInfo *exception) { const char *option; Image *mipmap_image, *resize_image; MagickBooleanType fast_mipmaps, status; register ssize_t i; size_t columns, rows; columns=DIV2(image->columns); rows=DIV2(image->rows); option=GetImageOption(image_info,"dds:fast-mipmaps"); fast_mipmaps=IsStringTrue(option); mipmap_image=image; resize_image=image; status=MagickTrue; for (i=0; i < (ssize_t) mipmaps; i++) { if (fromlist == MagickFalse) { mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter, exception); if (mipmap_image == (Image *) NULL) { status=MagickFalse; break; } } else { mipmap_image=mipmap_image->next; if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows)) ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported", image->filename); } DestroyBlob(mipmap_image); mipmap_image->blob=ReferenceBlob(image->blob); WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); if (fromlist == MagickFalse) { if (fast_mipmaps == MagickFalse) mipmap_image=DestroyImage(mipmap_image); else { if (resize_image != image) resize_image=DestroyImage(resize_image); resize_image=mipmap_image; } } columns=DIV2(columns); rows=DIV2(rows); } if (resize_image != image) resize_image=DestroyImage(resize_image); return(status); } static void WriteSingleColorFit(Image *image, const DDSVector4 *points, const ssize_t *map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const Quantum *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p))); if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p))); p+=GetPixelChannels(image); } } }
haval_fmt_plug.c
/* HAVAL cracker patch for JtR. Hacked together during April of 2013 by Dhiru * Kholia <dhiru at openwall.com>. * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_haval_256_3; extern struct fmt_main fmt_haval_128_4; #elif FMT_REGISTERS_H john_register_one(&fmt_haval_256_3); john_register_one(&fmt_haval_128_4); #else #include <string.h> #include "arch.h" #include "sph_haval.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> // Tuned on core i7 quad HT // 256-3 128-4 // 1 227k 228k // 64 6359k 5489k // 128 7953k 6654k // 256 8923k 7618k // 512 9804k 8223k // 1k 10307k 8569k ** set to this value // 2k 10081k 8427k // 4k 10551k 8893k #define OMP_SCALE 1024 #endif #include "memdbg.h" #define FORMAT_TAG "$haval$" #define TAG_LENGTH 7 #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE256 32 #define BINARY_SIZE128 16 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests haval_256_3_tests[] = { {"91850C6487C9829E791FC5B58E98E372F3063256BB7D313A93F1F83B426AEDCC", "HAVAL"}, {"$haval$91850C6487C9829E791FC5B58E98E372F3063256BB7D313A93F1F83B426AEDCC", "HAVAL"}, {NULL} }; static struct fmt_tests haval_128_4_tests[] = { {"EE6BBF4D6A46A679B3A856C88538BB98", ""}, {"$haval$ee6bbf4d6a46a679b3a856c88538bb98", ""}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE256 / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int valid(char *ciphertext, struct fmt_main *self, int len) { char *p; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; if (strlen(p) != len) return 0; while(*p) if(atoi16[ARCH_INDEX(*p++)]==0x7f) return 0; return 1; } /* we need independant valids, since the $haval$ signature is the same */ /* otherwise, if we have input with a mix of both types, then ALL of them */ /* will validate, even though only the ones of the proper type will actually */ /* be tested. If we had a singleton crypt function (which both 128-4 and */ /* 256-3 used, then a single valid would also work. But since each have */ /* their own crypt, and they are NOT compatible, then we need separate valids */ static int valid3(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, 64); } static int valid4(char *ciphertext, struct fmt_main *self) { return valid(ciphertext, self, 32); } static void *get_binary_256(char *ciphertext) { static union { unsigned char c[32]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < 32; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void *get_binary_128(char *ciphertext) { static union { unsigned char c[16]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = strrchr(ciphertext, '$') + 1; else p = ciphertext; for (i = 0; i < 16; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static int crypt_256_3(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_haval256_3_context ctx; sph_haval256_3_init(&ctx); sph_haval256_3(&ctx, saved_key[index], strlen(saved_key[index])); sph_haval256_3_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int crypt_128_4(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { sph_haval128_4_context ctx; sph_haval128_4_init(&ctx); sph_haval128_4(&ctx, saved_key[index], strlen(saved_key[index])); sph_haval128_4_close(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one256(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE256); } static int cmp_one128(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE128); } static int cmp_exact(char *source, int index) { return 1; } static void haval_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + 2 * BINARY_SIZE256 + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; strcpy(out, FORMAT_TAG); strcpy(&out[TAG_LENGTH], ciphertext); strlwr(&out[TAG_LENGTH]); return out; } struct fmt_main fmt_haval_256_3 = { { "HAVAL-256-3", "", ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE256, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE, #if FMT_MAIN_VERSION > 11 { NULL }, #endif haval_256_3_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid3, split, get_binary_256, fmt_default_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, fmt_default_set_salt, haval_set_key, get_key, fmt_default_clear_keys, crypt_256_3, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one256, cmp_exact } }; struct fmt_main fmt_haval_128_4 = { { "HAVAL-128-4", "", ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE128, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE, #if FMT_MAIN_VERSION > 11 { NULL }, #endif haval_128_4_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid4, split, get_binary_128, fmt_default_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, fmt_default_set_salt, haval_set_key, get_key, fmt_default_clear_keys, crypt_128_4, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one128, cmp_exact } }; #endif /* plugin stanza */
prand.c
//------------------------------------------------------------------------------ // GraphBLAS/Demo/Source/prand: parallel random number generator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // A simple thread-safe parallel pseudo-random nuumber generator. #include "GraphBLAS.h" #undef GB_PUBLIC #define GB_LIBRARY #include "graphblas_demos.h" //------------------------------------------------------------------------------ // prand macros //------------------------------------------------------------------------------ // Generate the next seed, and extract a random 15-bit value from a seed. #define PRAND_RECURENCE(seed) ((seed) * 1103515245 + 12345) #define PRAND_15_MAX 32767 #define PRAND_15(seed) (((seed)/65536) % (PRAND_15_MAX + 1)) //------------------------------------------------------------------------------ // global types and operators //------------------------------------------------------------------------------ // These can be shared by all threads in a user application, and thus are // safely declared as global objects. GrB_Type prand_type = NULL ; GrB_UnaryOp prand_next_op = NULL ; GrB_UnaryOp prand_iget_op = NULL ; GrB_UnaryOp prand_xget_op = NULL ; GrB_BinaryOp prand_dup_op = NULL ; //------------------------------------------------------------------------------ // prand_next_op: unary operator to construct the next seed //------------------------------------------------------------------------------ // z = f(x), where x is the old seed and z is the new seed. GB_PUBLIC void prand_next_f (prand_t *z, const prand_t *x) { for (int k = 0 ; k < 5 ; k++) { z->seed [k] = PRAND_RECURENCE (x->seed [k]) ; } } //------------------------------------------------------------------------------ // prand_iget: unary operator to construct get a random integer from the seed //------------------------------------------------------------------------------ // z = f(x), where x is a random seed, and z is an unsigned 64-bit // pseudo-random number constructed from the seed. GB_PUBLIC void prand_iget_f (uint64_t *z, const prand_t *x) { uint64_t i = 0 ; for (int k = 0 ; k < 5 ; k++) { i = PRAND_15_MAX * i + PRAND_15 (x->seed [k]) ; } (*z) = i ; } //------------------------------------------------------------------------------ // prand_xget: unary operator to construct get a random double from the seed //------------------------------------------------------------------------------ // z = f(x), where x is a random seed, and z is a double precision // pseudo-random number constructed from the seed, in the range 0 to 1. GB_PUBLIC void prand_xget_f (double *z, prand_t *x) { uint64_t i ; prand_iget_f (&i, x) ; (*z) = ((double) i) / ((double) UINT64_MAX) ; } //------------------------------------------------------------------------------ // prand_dup: binary operator to build a vector //------------------------------------------------------------------------------ // This is required by GrB_Vector_build, but is never called since no // duplicates are created. This is the SECOND operator for the prand_type. #if defined ( __INTEL_COMPILER ) // disable icc warnings // 869: unused parameters #pragma warning (disable: 869 ) #elif defined ( __GNUC__ ) #pragma GCC diagnostic ignored "-Wunused-parameter" #endif GB_PUBLIC void prand_dup_f (prand_t *z, /* unused: */ const prand_t *x, const prand_t *y) { (*z) = (*y) ; } //------------------------------------------------------------------------------ // prand_init: create the random seed type and its operators //------------------------------------------------------------------------------ #define PRAND_FREE_ALL \ { \ GrB_Type_free (&prand_type) ; \ GrB_UnaryOp_free (&prand_next_op) ; \ GrB_UnaryOp_free (&prand_iget_op) ; \ GrB_UnaryOp_free (&prand_xget_op) ; \ GrB_BinaryOp_free (&prand_dup_op) ; \ } #undef OK #define OK(method) \ { \ GrB_Info info = method ; \ if (info != GrB_SUCCESS) \ { \ PRAND_FREE_ALL ; \ printf ("GraphBLAS error: %d\n", info) ; \ return (info) ; \ } \ } GB_PUBLIC GrB_Info prand_init ( ) { prand_type = NULL ; prand_next_op = NULL ; prand_iget_op = NULL ; prand_xget_op = NULL ; prand_dup_op = NULL ; OK (GrB_Type_new (&prand_type, sizeof (prand_t))) ; OK (GrB_UnaryOp_new (&prand_next_op, (GxB_unary_function) prand_next_f, prand_type, prand_type)) ; OK (GrB_UnaryOp_new (&prand_iget_op, (GxB_unary_function) prand_iget_f, GrB_UINT64, prand_type)) ; OK (GrB_UnaryOp_new (&prand_xget_op, (GxB_unary_function) prand_xget_f, GrB_FP64, prand_type)) ; OK (GrB_BinaryOp_new (&prand_dup_op, (GxB_binary_function) prand_dup_f, prand_type, prand_type, prand_type)) ; return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // prand_finalize: free the random seed type and its operators //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info prand_finalize ( ) { PRAND_FREE_ALL ; return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // prand_next: get the next random numbers //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info prand_next ( GrB_Vector Seed ) { return (GrB_Vector_apply (Seed, NULL, NULL, prand_next_op, Seed, NULL)) ; } //------------------------------------------------------------------------------ // prand_seed: create a vector of random seeds //------------------------------------------------------------------------------ // Returns a vector of random seed values. #define PRAND_FREE_WORK \ { \ free (I) ; \ free (X) ; \ } #undef PRAND_FREE_ALL #define PRAND_FREE_ALL \ { \ PRAND_FREE_WORK ; \ GrB_Vector_free (Seed) ; \ } GB_PUBLIC GrB_Info prand_seed ( GrB_Vector *Seed, // vector of random number seeds int64_t seed, // scalar input seed GrB_Index n, // size of Seed to create int nthreads // # of threads to use (OpenMP default if <= 0) ) { GrB_Index *I = NULL ; prand_t *X = NULL ; // allocate the Seed vector OK (GrB_Vector_new (Seed, prand_type, n)) ; // allocate the I and X arrays I = (GrB_Index *) malloc ((n+1) * sizeof (GrB_Index)) ; X = (prand_t *) malloc ((n+1) * sizeof (prand_t)) ; if (I == NULL || X == NULL) { PRAND_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } // determine # of threads to use int nthreads_max = 1 ; #ifdef _OPENMP nthreads_max = omp_get_max_threads ( ) ; #endif if (nthreads <= 0 || nthreads > nthreads_max) { nthreads = nthreads_max ; } // construct the tuples for the initial seeds int64_t i, len = (int64_t) n ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (i = 0 ; i < len ; i++) { I [i] = i ; for (int k = 0 ; k < 5 ; k++) { X [i].seed [k] = (100000000*(seed) + 10*i + k + 1) ; } } // build the Seed vector OK (GrB_Vector_build_UDT (*Seed, I, X, n, prand_dup_op)) ; // free workspace PRAND_FREE_WORK ; // advance to the first set of random numbers OK (prand_next (*Seed)) ; return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // prand_print: print the Seed vector //------------------------------------------------------------------------------ // This is meant for testing, not production use. #undef PRAND_FREE_ALL #define PRAND_FREE_ALL ; GB_PUBLIC GrB_Info prand_print ( GrB_Vector Seed, int pr // 0: print nothing, 1: print some, 2: print all ) { if (pr > 0) { GrB_Index n ; OK (GrB_Vector_nvals (&n, Seed)) ; printf ("\nSeed: length %g\n", (double) n) ; prand_t x ; for (int k = 0 ; k < 5 ; k++) x.seed [k] = -1 ; for (int64_t i = 0 ; i < (int64_t) n ; i++) { if (GrB_Vector_extractElement_UDT (&x, Seed, i) == GrB_SUCCESS) { printf ("%g: ", (double) i) ; for (int k = 0 ; k < 5 ; k++) { printf (" %.18g", (double) (x.seed [k])) ; } printf ("\n") ; } if (pr == 1 && i > 10) break ; } } return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // prand_iget: return a vector of random uint64 integers //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info prand_iget ( GrB_Vector X, GrB_Vector Seed ) { OK (GrB_Vector_apply (X, NULL, NULL, prand_iget_op, Seed, NULL)) ; return (prand_next (Seed)) ; } //------------------------------------------------------------------------------ // prand_xget: return a vector of random doubles, in range 0 to 1 inclusive //------------------------------------------------------------------------------ GB_PUBLIC GrB_Info prand_xget ( GrB_Vector X, GrB_Vector Seed ) { OK (GrB_Vector_apply (X, NULL, NULL, prand_xget_op, Seed, NULL)) ; return (prand_next (Seed)) ; }