source
stringlengths
3
92
c
stringlengths
26
2.25M
par_csr_matvec.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.hpp" //RL: TODO par_csr_matvec_device.c, include cuda there /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec *--------------------------------------------------------------------------*/ // y = alpha*A*x + beta*b HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *x_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride( x_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( x_local ); HYPRE_Complex *x_tmp_data, **x_buf_data; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_assert( idxstride>0 ); if (num_cols != x_size) { ierr = 11; } if (num_rows != y_size || num_rows != b_size) { ierr = 12; } if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) { ierr = 13; } hypre_assert( hypre_VectorNumVectors(b_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); } else { hypre_assert( num_vectors > 1 ); x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors ); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* x_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for x_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); #endif } hypre_VectorData(x_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(x_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(x_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); hypre_SeqVectorSetDataOwner(x_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(x_tmp, HYPRE_MEMORY_DEVICE); x_tmp_data = hypre_VectorData(x_tmp); /* x_buff_data */ x_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); #endif } x_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM x_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); continue; #endif } x_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); //hypre_SeqVectorPrefetch(x_local, HYPRE_MEMORY_DEVICE); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *send_data = (HYPRE_Complex *) x_buf_data[jv]; HYPRE_Complex *locl_data = x_local_data + jv * vecstride; /* if on device, no need to Sync: send_data is on device memory */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* pack send data on device */ HYPRE_THRUST_CALL( gather, hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) + hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), locl_data, send_data ); #elif defined(HYPRE_USING_DEVICE_OPENMP) /* pack send data on device */ HYPRE_Int i; HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #pragma omp target teams distribute parallel for private(i) is_device_ptr(send_data, locl_data, device_send_map_elmts) for (i = start; i < end; i++) { send_data[i] = locl_data[device_send_map_elmts[i]]; } #else HYPRE_Int i; /* pack send data on host */ #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { send_data[i] = locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)]; } #endif } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication starts */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg, HYPRE_MEMORY_DEVICE, x_buf_data[jv], HYPRE_MEMORY_DEVICE, &x_tmp_data[jv*num_cols_offd] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* computation offd part */ if (num_cols_offd) { hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local ); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif return ierr; } HYPRE_Int hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvecT * * Performs y <- alpha * A^T * x + beta * y * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *diagT = hypre_ParCSRMatrixDiagT(A); hypre_CSRMatrix *offdT = hypre_ParCSRMatrixOffdT(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *y_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride(y_local); HYPRE_Int idxstride = hypre_VectorIndexStride(y_local); HYPRE_Complex *y_tmp_data, **y_buf_data; HYPRE_Complex *y_local_data = hypre_VectorData(y_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_rows != x_size) { ierr = 1; } if (num_cols != y_size) { ierr = 2; } if (num_rows != x_size && num_cols != y_size) { ierr = 3; } hypre_assert( hypre_VectorNumVectors(x_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { y_tmp = hypre_SeqVectorCreate(num_cols_offd); } else { hypre_assert( num_vectors > 1 ); y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd, num_vectors); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* y_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for y_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); #endif } hypre_VectorData(y_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(y_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(y_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); hypre_SeqVectorSetDataOwner(y_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(y_tmp, HYPRE_MEMORY_DEVICE); y_tmp_data = hypre_VectorData(y_tmp); /* y_buf_data */ y_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); #endif } y_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM y_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); continue; #endif } y_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif if (num_cols_offd) { if (offdT) { // offdT is optional. Used only if it's present hypre_CSRMatrixMatvec(alpha, offdT, x_local, 0.0, y_tmp); } else { hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { /* this is where we assume multivectors are 'column' storage */ comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 2, comm_pkg, HYPRE_MEMORY_DEVICE, &y_tmp_data[jv*num_cols_offd], HYPRE_MEMORY_DEVICE, y_buf_data[jv] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ if (diagT) { // diagT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, diagT, x_local, beta, y_local); } else { hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *recv_data = (HYPRE_Complex *) y_buf_data[jv]; HYPRE_Complex *locl_data = y_local_data + jv * vecstride; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* unpack recv data on device */ if (!hypre_ParCSRCommPkgWorkSpace(comm_pkg)) { hypre_ParCSRCommPkgWorkSpace(comm_pkg) = hypre_TAlloc( char, (2*sizeof(HYPRE_Int)+sizeof(HYPRE_Real)) * hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE ); } hypreDevice_GenScatterAdd(locl_data, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), recv_data, hypre_ParCSRCommPkgWorkSpace(comm_pkg)); #elif defined(HYPRE_USING_DEVICE_OPENMP) HYPRE_Int i, j; /* unpack recv data on device */ for (i = 0; i < num_sends; i++) { HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); #pragma omp target teams distribute parallel for private(j) is_device_ptr(recv_data, locl_data, device_send_map_elmts) for (j = start; j < end; j++) { locl_data[device_send_map_elmts[j]] += recv_data[j]; } } #else HYPRE_Int i; /* unpack recv data on host, TODO OMP? */ for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)] += recv_data[i]; } #endif } hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncCudaComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y, HYPRE_Int *CF_marker, HYPRE_Int fpt ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, index, start, num_procs; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Complex *x_tmp_data = NULL; HYPRE_Complex *x_buf_data = NULL; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm,&num_procs); if (num_cols != x_size) ierr = 11; if (num_rows != y_size) ierr = 12; if (num_cols != x_size && num_rows != y_size) ierr = 13; if (num_procs > 1) { if (num_cols_offd) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_sends) x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) x_buf_data[index++] = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data ); } hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker, CF_marker, fpt); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_sends) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11,comm_pkg,int_buf_data,CF_marker_offd ); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local, CF_marker, CF_marker_offd, fpt); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); } return ierr; }
convolution_pack4to1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_transform_kernel_pack4to1_neon(const Mat& weight_data, Mat& weight_data_pack4to1, int num_input, int num_output, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // src = kw-kh-inch-outch // dst = 4a-kw-kh-inch/4a-outch Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output); weight_data_pack4to1.create(maxk, num_input / 4, num_output, (size_t)4 * 4, 4); for (int q = 0; q < num_output; q++) { const Mat k0 = weight_data_r2.channel(q); Mat g0 = weight_data_pack4to1.channel(q); for (int p = 0; p + 3 < num_input; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); float* g00 = g0.row(p / 4); for (int k = 0; k < maxk; k++) { g00[0] = k00[k]; g00[1] = k01[k]; g00[2] = k02[k]; g00[3] = k03[k]; g00 += 4; } } } } static void convolution_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4to1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } const float* kptr = (const float*)weight_data_pack4to1 + maxk * channels * p * 4; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float* sptr = m.row(i * stride_h) + j * stride_w * 4; for (int k = 0; k < maxk; k++) // 29.23 { float32x4_t _val = vld1q_f32(sptr + space_ofs[k] * 4); float32x4_t _w = vld1q_f32(kptr); float32x4_t _s4 = vmulq_f32(_val, _w); #if __aarch64__ sum += vaddvq_f32(_s4); // dot #else float32x2_t _ss = vadd_f32(vget_low_f32(_s4), vget_high_f32(_s4)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #endif kptr += 4; } } sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
target-11.c
/* { dg-require-effective-target offload_device_nonshared_as } */ #include <stdlib.h> #include <assert.h> #define N 32 void test_array_section (int *p) { #pragma omp target data map(alloc: p[0:N]) { int ok = 1; for (int i = 10; i < 10 + 4; i++) p[i] = 997 * i; #pragma omp target map(always to:p[10:4]) map(tofrom: ok) for (int i = 10; i < 10 + 4; i++) if (p[i] != 997 * i) ok = 0; assert (ok); #pragma omp target map(always from:p[7:9]) for (int i = 0; i < N; i++) p[i] = i; } } int main () { int aa = 0, bb = 0, cc = 0, dd = 0; #pragma omp target data map(tofrom: aa) map(to: bb) map(from: cc, dd) { int ok; aa = bb = cc = 1; /* Set dd on target to 0 for the further check. */ #pragma omp target map(always to: dd) ; dd = 1; #pragma omp target map(tofrom: aa) map(always to: bb) \ map(always from: cc) map(to: dd) map(from: ok) { /* bb is always to, aa and dd are not. */ ok = (aa == 0) && (bb == 1) && (dd == 0); aa = bb = cc = dd = 2; } assert (ok); assert (aa == 1); assert (bb == 1); assert (cc == 2); /* cc is always from. */ assert (dd == 1); dd = 3; #pragma omp target map(from: cc) map(always to: dd) map(from: ok) { ok = (dd == 3); /* dd is always to. */ cc = dd = 4; } assert (ok); assert (cc == 2); assert (dd == 3); } assert (aa == 2); assert (bb == 1); assert (cc == 4); assert (dd == 4); int *array = calloc (N, sizeof (int)); test_array_section (array); for (int i = 0; i < 7; i++) assert (array[i] == 0); for (int i = 7; i < 7 + 9; i++) assert (array[i] == i); for (int i = 7 + 9; i < N; i++) assert (array[i] == 0); free (array); return 0; }
nr_incore.c
/* * Incore version of non-relativistic integrals JK contraction * ic in CVHFic... is short for incore */ #include <stdlib.h> #include <string.h> #include <math.h> //#include <omp.h> #include "config.h" #include "cvhf.h" #include "np_helper/np_helper.h" #include "fblas.h" /* * J */ void CVHFics8_ij_s2kl_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij; double *vj_ij = &vj[ic*nao+jc]; if (ic > jc) { dm_ij = dm[ic*nao+jc] + dm[jc*nao+ic]; } else if (ic == jc) { dm_ij = dm[ic*nao+ic]; } else { return; } for (i = 0, ij = 0; i < ic; i++) { for (j = 0; j < i; j++, ij++) { *vj_ij += eri[ij] *(dm[i*nao+j]+dm[j*nao+i]); vj[i*nao+j] += eri[ij] * dm_ij; } *vj_ij += eri[ij] * dm[i*nao+i]; vj[i*nao+i] += eri[ij] * dm_ij; ij++; } // i == ic for (j = 0; j < jc; j++, ij++) { *vj_ij += eri[ij] *(dm[i*nao+j]+dm[j*nao+i]); vj[i*nao+j] += eri[ij] * dm_ij; } *vj_ij += eri[ij] * dm_ij; } void CVHFics4_ij_s2kl_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij; if (ic > jc) { dm_ij = dm[ic*nao+jc] + dm[jc*nao+ic]; } else if (ic == jc) { dm_ij = dm[ic*nao+ic]; } else { return; } for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j <= i; j++, ij++) { vj[i*nao+j] += eri[ij] * dm_ij; } } } void CVHFics2kl_kl_s1ij_o0(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double *vj_ij = &vj[ic*nao+jc]; for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j < i; j++, ij++) { *vj_ij += eri[ij] *(dm[i*nao+j]+dm[j*nao+i]); } *vj_ij += eri[ij] * dm[i*nao+i]; ij++; } } /* * K */ void CVHFics8_jk_s1il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < ic; k++) { for (l = 0; l < k; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; vk[l*nao+jc] += eri[kl] * dm[k*nao+ic]; vk[k*nao+jc] += eri[kl] * dm[l*nao+ic]; vk[l*nao+ic] += eri[kl] * dm[k*nao+jc]; vk[k*nao+ic] += eri[kl] * dm[l*nao+jc]; } vk[jc*nao+k] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; vk[k*nao+jc] += eri[kl] * dm[k*nao+ic]; vk[k*nao+ic] += eri[kl] * dm[k*nao+jc]; kl++; } k = ic; for (l = 0; l < jc; l++, kl++) { // l<k vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; vk[l*nao+jc] += eri[kl] * dm[k*nao+ic]; vk[k*nao+jc] += eri[kl] * dm[l*nao+ic]; vk[l*nao+ic] += eri[kl] * dm[k*nao+jc]; vk[k*nao+ic] += eri[kl] * dm[l*nao+jc]; } // ic = k, jc = l; vk[jc*nao+jc] += eri[kl] * dm[ic*nao+ic]; vk[ic*nao+jc] += eri[kl] * dm[jc*nao+ic]; vk[jc*nao+ic] += eri[kl] * dm[ic*nao+jc]; vk[ic*nao+ic] += eri[kl] * dm[jc*nao+jc]; } else if (ic == jc) { for (k = 0, kl = 0; k < ic; k++) { for (l = 0; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[ic*nao+l]; vk[l*nao+ic] += eri[kl] * dm[k*nao+ic]; vk[k*nao+ic] += eri[kl] * dm[l*nao+ic]; } vk[ic*nao+k] += eri[kl] * dm[ic*nao+k]; vk[k*nao+ic] += eri[kl] * dm[k*nao+ic]; kl++; } k = ic; for (l = 0; l < k; l++, kl++) { // l<k vk[ic*nao+l] += eri[kl] * dm[ic*nao+ic]; vk[ic*nao+ic] += eri[kl] * dm[ic*nao+l]; vk[l*nao+ic] += eri[kl] * dm[ic*nao+ic]; vk[ic*nao+ic] += eri[kl] * dm[l*nao+ic]; } // ic = jc = k = l vk[ic*nao+ic] += eri[kl] * dm[ic*nao+ic]; } } void CVHFics8_jk_s2il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l; if (ic > jc) { // k < jc for (k=0; k < jc; k++) { for (l = 0; l < k; l++) { vk[jc*nao+l] += eri[l] * dm[ic*nao+k]; vk[jc*nao+k] += eri[l] * dm[ic*nao+l]; vk[ic*nao+l] += eri[l] * dm[jc*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+l]; } // l = k vk[jc*nao+k] += eri[k] * dm[ic*nao+k]; vk[ic*nao+k] += eri[k] * dm[jc*nao+k]; eri += k + 1; } // k = jc for (l = 0; l < k; l++) { vk[jc*nao+l] += eri[l] * dm[ic*nao+jc]; vk[jc*nao+jc] += eri[l] *(dm[ic*nao+l] + dm[l*nao+ic]); vk[ic*nao+l] += eri[l] * dm[jc*nao+jc]; vk[ic*nao+jc] += eri[l] * dm[jc*nao+l]; } // l = k = jc vk[jc*nao+jc] += eri[l] *(dm[ic*nao+jc] + dm[jc*nao+ic]); vk[ic*nao+jc] += eri[l] * dm[jc*nao+jc]; eri += k + 1; // k > jc for (k=jc+1; k < ic; k++) { // l < jc for (l = 0; l < jc; l++) { vk[jc*nao+l] += eri[l] * dm[ic*nao+k]; vk[ic*nao+l] += eri[l] * dm[jc*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+l]; vk[k*nao+jc] += eri[l] * dm[l*nao+ic]; } // l = jc vk[jc*nao+jc] += eri[l] *(dm[ic*nao+k] + dm[k*nao+ic]); vk[ic*nao+jc] += eri[l] * dm[jc*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+jc]; vk[k*nao+jc] += eri[l] * dm[jc*nao+ic]; //eri += jc+1; // l > jc for (l = jc+1; l < k; l++) { vk[ic*nao+l] += eri[l] * dm[jc*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+l]; vk[l*nao+jc] += eri[l] * dm[k*nao+ic]; vk[k*nao+jc] += eri[l] * dm[l*nao+ic]; } // l = k vk[jc*nao+k] += eri[l] * dm[ic*nao+k]; vk[ic*nao+k] += eri[l] * dm[jc*nao+k]; vk[k*nao+jc] += eri[l] * dm[k*nao+ic]; eri += k + 1; } // k = ic for (l = 0; l < jc; l++) { vk[jc*nao+l] += eri[l] * dm[ic*nao+ic]; vk[ic*nao+l] += eri[l] * dm[jc*nao+ic]; vk[ic*nao+ic] += eri[l] *(dm[jc*nao+l] + dm[l*nao+jc]); vk[ic*nao+jc] += eri[l] * dm[l*nao+ic]; } // ic = k, jc = l; vk[jc*nao+jc] += eri[l] * dm[ic*nao+ic]; vk[ic*nao+jc] += eri[l] * dm[jc*nao+ic]; vk[ic*nao+ic] += eri[l] * dm[jc*nao+jc]; eri += jc + 1; } else if (ic == jc) { for (k = 0; k < ic-1; k+=2) { for (l = 0; l < k; l++) { vk[ic*nao+l] += eri[l] * dm[ic*nao+k]; vk[ic*nao+k] += eri[l] * dm[ic*nao+l]; vk[ic*nao+l ] += eri[l+k+1] * dm[ic*nao+k+1]; vk[ic*nao+k+1] += eri[l+k+1] * dm[ic*nao+l ]; } vk[ic*nao+k] += eri[k] * dm[ic*nao+k]; eri += k+1; vk[ic*nao+k ] += eri[k] * dm[ic*nao+k+1]; vk[ic*nao+k+1] += eri[k] * dm[ic*nao+k ]; vk[ic*nao+k+1] += eri[k+1] * dm[ic*nao+k+1]; eri += k+2; } for (; k < ic; k++) { for (l = 0; l < k; l++) { vk[ic*nao+l] += eri[l] * dm[ic*nao+k]; vk[ic*nao+k] += eri[l] * dm[ic*nao+l]; } vk[ic*nao+k] += eri[k] * dm[ic*nao+k]; eri += k+1; } for (l = 0; l < k; l++) { // l<k vk[ic*nao+l] += eri[l] * dm[ic*nao+ic]; vk[ic*nao+ic] += eri[l] *(dm[ic*nao+l] + dm[l*nao+ic]); } // ic = jc = k = l vk[ic*nao+ic] += eri[l] * dm[ic*nao+ic]; eri += k + 1; } } void CVHFics4_jk_s1il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } vk[jc*nao+k] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; kl++; } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[ic*nao+l]; } vk[ic*nao+k] += eri[kl] * dm[ic*nao+k]; kl++; } } } void CVHFics4_il_s1jk_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_jk_s2il_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k <= jc; k++) { for (l = 0; l < k; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } vk[jc*nao+k] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; kl++; } for (k = jc+1; k <= ic; k++) { for (l = 0; l <= jc; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } for (l = jc+1; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; kl++; } for (k = ic+1; k < nao; k++) { for (l = 0, kl = k*(k+1)/2; l <= jc; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; } for (l = jc+1; l <= ic; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; } } } else if (ic == jc) { for (k = 0, kl = 0; k <= ic; k++) { for (l = 0; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+k] += eri[kl] * dm[ic*nao+l]; } vk[ic*nao+k] += eri[kl] * dm[ic*nao+k]; kl++; } for (k = ic+1; k < nao; k++) { for (l = 0, kl = k*(k+1)/2; l <= ic; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; } } } } void CVHFics4_il_s2jk_o0(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,ij->(s2)kl * 8-fold symmetry for eri: i>=j,k>=l,ij>=kl * input address eri of the first element for pair ij=ic*(ic+1)/2+jc * i.e. ~ &eri_ao[ij*(ij+1)/2] * dm can be non-Hermitian, * output vk might not be Hermitian * * NOTE all _s2kl (nrs8_, nrs4_, nrs2kl_) assumes the tril part of eri * being stored in C-order *contiguously*. so call CVHFunpack_nrblock2tril * to generate eris */ void CVHFics8_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics8_ij_s2kl_o0(eri, dm, vj, nao, ic, jc); } // tri_dm: fold upper triangular dm to lower triangle, // tri_dm[i*(i+1)/2+j] = dm[i*nao+j] + dm[j*nao+i] for i > j void CVHFics8_tridm_vj(double *eri, double *tri_dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ijc = tri_dm[ic*(ic+1)/2+jc]; double *vj_ij = &vj[ic*nao+jc]; const int INC1 = 1; int i1; for (i = 0, ij = 0; i < ic; i++) { i1 = i + 1; *vj_ij += ddot_(&i1, eri+ij, &INC1, tri_dm+ij, &INC1); daxpy_(&i1, &dm_ijc, eri+ij, &INC1, vj+i*nao, &INC1); ij += i1; } // i == ic for (j = 0; j < jc; j++, ij++) { *vj_ij += eri[ij] * tri_dm[ij]; vj[i*nao+j] += eri[ij] * dm_ijc; } *vj_ij += eri[ij] * dm_ijc; } void CVHFics8_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics8_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,jk->(s2)il * output vk should be Hermitian */ void CVHFics8_jk_s2il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics8_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } /* * einsum ijkl,jk->il * 4-fold symmetry for eri: i>=j,k>=l */ void CVHFics4_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s1il_o0(eri, dm, vk, nao, ic, jc); } /* * output vk should be Hermitian */ void CVHFics4_jk_s2il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_il_s2jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { CVHFics4_jk_s2il_o0(eri, dm, vk, nao, ic, jc); } void CVHFics4_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics4_ij_s2kl_o0(eri, dm, vj, nao, ic, jc); } void CVHFics4_kl_s2ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { if (ic >= jc) { CVHFics2kl_kl_s1ij_o0(eri, dm, vj, nao, ic, jc); } } void CVHFics1_ij_s1kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i; double dm_ij = dm[ic*nao+jc]; for (i = 0; i < nao*nao; i++) { vj[i] += eri[i] * dm_ij; } } void CVHFics1_kl_s1ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { const int INC1 = 1; int nn = nao * nao; vj[ic*nao+jc] += ddot_(&nn, eri, &INC1, dm, &INC1); } void CVHFics1_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; } } } void CVHFics1_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; } } } void CVHFics2ij_ij_s1kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i; double dm_ij; if (ic > jc) { dm_ij = dm[ic*nao+jc] + dm[jc*nao+ic]; } else if (ic == jc) { dm_ij = dm[ic*nao+ic]; } else { return; } for (i = 0; i < nao*nao; i++) { vj[i] += eri[i] * dm_ij; } } void CVHFics2ij_kl_s2ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { if (ic < jc) { return; } CVHFics1_kl_s1ij(eri, dm, vj, nao, ic, jc); } void CVHFics2ij_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; } } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[ic*nao+k]; } } } } void CVHFics2ij_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; if (ic > jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } } } else if (ic == jc) { for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < nao; l++, kl++) { vk[ic*nao+k] += eri[kl] * dm[ic*nao+l]; } } } } void CVHFics2kl_ij_s2kl(double *eri, double *dm, double *vj, int nao, int ic, int jc) { int i, j, ij; double dm_ij = dm[ic*nao+jc]; for (i = 0, ij = 0; i < nao; i++) { for (j = 0; j <= i; j++, ij++) { vj[i*nao+j] += eri[ij] * dm_ij; } } } void CVHFics2kl_kl_s1ij(double *eri, double *dm, double *vj, int nao, int ic, int jc) { CVHFics2kl_kl_s1ij_o0(eri, dm, vj, nao, ic, jc); } void CVHFics2kl_jk_s1il(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[ic*nao+l] += eri[kl] * dm[jc*nao+k]; vk[ic*nao+k] += eri[kl] * dm[jc*nao+l]; } vk[ic*nao+k] += eri[kl] * dm[jc*nao+k]; kl++; } } void CVHFics2kl_il_s1jk(double *eri, double *dm, double *vk, int nao, int ic, int jc) { int k, l, kl; for (k = 0, kl = 0; k < nao; k++) { for (l = 0; l < k; l++, kl++) { vk[jc*nao+l] += eri[kl] * dm[ic*nao+k]; vk[jc*nao+k] += eri[kl] * dm[ic*nao+l]; } vk[jc*nao+k] += eri[kl] * dm[ic*nao+k]; kl++; } } /************************************************** * s8 8-fold symmetry: i>=j,k>=l,ij>=kl * s4 4-fold symmetry: i>=j,k>=l * s2ij 2-fold symmetry: i>=j * s2kl 2-fold symmetry: k>=l * s1 no permutation symmetry **************************************************/ void CVHFnrs8_incore_drv(double *eri, double *dmj, double *vj, double *dmk, double *vk, int n, void (*const fvj)(), void (*const fvk)()) { const int npair = n*(n+1)/2; double *vj_priv, *vk_priv; int i, j; size_t ij, off; memset(vj, 0, sizeof(double)*n*n); memset(vk, 0, sizeof(double)*n*n); #pragma omp parallel default(none) \ shared(eri, dmj, dmk, vj, vk, n) \ private(ij, i, j, off, vj_priv, vk_priv) { vj_priv = malloc(sizeof(double)*n*n); vk_priv = malloc(sizeof(double)*n*n); memset(vj_priv, 0, sizeof(double)*n*n); memset(vk_priv, 0, sizeof(double)*n*n); #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - i*(i+1)/2; off = ij*(ij+1)/2; (*fvj)(eri+off, dmj, vj_priv, n, i, j); (*fvk)(eri+off, dmk, vk_priv, n, i, j); } #pragma omp critical { for (i = 0; i < n*n; i++) { vj[i] += vj_priv[i]; vk[i] += vk_priv[i]; } } free(vj_priv); free(vk_priv); } } void CVHFnrs4_incore_drv(double *eri, double *dmj, double *vj, double *dmk, double *vk, int n, void (*const fvj)(), void (*const fvk)()) { const int npair = n*(n+1)/2; double *vj_priv, *vk_priv; int i, j; size_t ij, off; memset(vj, 0, sizeof(double)*n*n); memset(vk, 0, sizeof(double)*n*n); #pragma omp parallel default(none) \ shared(eri, dmj, dmk, vj, vk, n) \ private(ij, i, j, off, vj_priv, vk_priv) { vj_priv = malloc(sizeof(double)*n*n); vk_priv = malloc(sizeof(double)*n*n); memset(vj_priv, 0, sizeof(double)*n*n); memset(vk_priv, 0, sizeof(double)*n*n); #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - i*(i+1)/2; off = ij * npair; (*fvj)(eri+off, dmj, vj_priv, n, i, j); (*fvk)(eri+off, dmk, vk_priv, n, i, j); } #pragma omp critical { for (i = 0; i < n*n; i++) { vj[i] += vj_priv[i]; vk[i] += vk_priv[i]; } } free(vj_priv); free(vk_priv); } } void CVHFnrs2ij_incore_drv(double *eri, double *dmj, double *vj, double *dmk, double *vk, int n, void (*const fvj)(), void (*const fvk)()) { const int npair = n*(n+1)/2; double *vj_priv, *vk_priv; int i, j; size_t ij, off; memset(vj, 0, sizeof(double)*n*n); memset(vk, 0, sizeof(double)*n*n); #pragma omp parallel default(none) \ shared(eri, dmj, dmk, vj, vk, n) \ private(ij, i, j, off, vj_priv, vk_priv) { vj_priv = malloc(sizeof(double)*n*n); vk_priv = malloc(sizeof(double)*n*n); memset(vj_priv, 0, sizeof(double)*n*n); memset(vk_priv, 0, sizeof(double)*n*n); #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < npair; ij++) { i = (int)(sqrt(2*ij+.25) - .5 + 1e-7); j = ij - i*(i+1)/2; off = ij * n * n; (*fvj)(eri+off, dmj, vj_priv, n, i, j); (*fvk)(eri+off, dmk, vk_priv, n, i, j); } #pragma omp critical { for (i = 0; i < n*n; i++) { vj[i] += vj_priv[i]; vk[i] += vk_priv[i]; } } free(vj_priv); free(vk_priv); } } void CVHFnrs2kl_incore_drv(double *eri, double *dmj, double *vj, double *dmk, double *vk, int n, void (*const fvj)(), void (*const fvk)()) { const int npair = n*(n+1)/2; double *vj_priv, *vk_priv; int i, j; size_t ij, off; memset(vj, 0, sizeof(double)*n*n); memset(vk, 0, sizeof(double)*n*n); #pragma omp parallel default(none) \ shared(eri, dmj, dmk, vj, vk, n) \ private(ij, i, j, off, vj_priv, vk_priv) { vj_priv = malloc(sizeof(double)*n*n); vk_priv = malloc(sizeof(double)*n*n); memset(vj_priv, 0, sizeof(double)*n*n); memset(vk_priv, 0, sizeof(double)*n*n); #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < n*n; ij++) { i = ij / n; j = ij - i * n; off = ij * npair; (*fvj)(eri+off, dmj, vj_priv, n, i, j); (*fvk)(eri+off, dmk, vk_priv, n, i, j); } #pragma omp critical { for (i = 0; i < n*n; i++) { vj[i] += vj_priv[i]; vk[i] += vk_priv[i]; } } free(vj_priv); free(vk_priv); } } void CVHFnrs1_incore_drv(double *eri, double *dmj, double *vj, double *dmk, double *vk, int n, void (*const fvj)(), void (*const fvk)()) { memset(vj, 0, sizeof(double)*n*n); memset(vk, 0, sizeof(double)*n*n); #pragma omp parallel default(none) \ shared(eri, dmj, dmk, vj, vk, n) { int i, j; size_t ij, off; double *vj_priv = malloc(sizeof(double)*n*n); double *vk_priv = malloc(sizeof(double)*n*n); memset(vj_priv, 0, sizeof(double)*n*n); memset(vk_priv, 0, sizeof(double)*n*n); #pragma omp for nowait schedule(dynamic, 4) for (ij = 0; ij < n*n; ij++) { i = ij / n; j = ij - i * n; off = ij * n * n; (*fvj)(eri+off, dmj, vj_priv, n, i, j); (*fvk)(eri+off, dmk, vk_priv, n, i, j); } #pragma omp critical { for (i = 0; i < n*n; i++) { vj[i] += vj_priv[i]; vk[i] += vk_priv[i]; } } free(vj_priv); free(vk_priv); } }
RNAdos.c
/* * Compute the density of states * * c Gregor Entzian, Ronny Lorenz * Vienna RNA package */ #include <stdlib.h> #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/utils/structures.h" #include "ViennaRNA/params/io.h" #include "ViennaRNA/datastructures/basic.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/alphabet.h" #include "ViennaRNA/mfe.h" #include "ViennaRNA/file_utils.h" #include "ViennaRNA/io/file_formats.h" #include "ViennaRNA/datastructures/hash_tables.h" #ifdef _OPENMP #include <omp.h> #endif #include "RNAdos_cmdl.h" typedef struct key_value_ { int key; int value; } key_value; static unsigned hash_function_dos(void *hash_entry, unsigned long hashtable_size) { key_value *hem = ((key_value *)hash_entry); unsigned long c = (unsigned long)hem->key; return c % hashtable_size; } /*** * 0 is equal, 1 is different! */ static int hash_comparison_dos(void *x, void *y) { key_value *hem_x = ((key_value *)x); key_value *hem_y = ((key_value *)y); if ((x == NULL) ^ (y == NULL)) return 1; return !(hem_x->key == hem_y->key); } static int free_hash_entry_dos(void *hash_entry) { /* * if (hash_entry != NULL) { * key_value *hem_x = ((key_value *) hash_entry); * free (hem_x); * } */ return 0; } static vrna_hash_table_t create_hashtable(int hashbits) { vrna_callback_ht_free_entry *my_free = free_hash_entry_dos; vrna_callback_ht_compare_entries *my_comparison = hash_comparison_dos; vrna_callback_ht_hash_function *my_hash_function = hash_function_dos; vrna_hash_table_t ht = vrna_ht_init(hashbits, my_comparison, my_hash_function, my_free); return ht; } typedef struct energy_count_ { int energy; double count; } energy_count; typedef struct hashtable_list_ { unsigned long length; unsigned long allocated_size; energy_count *list_energy_count_pairs; key_value **list_key_value_pairs; vrna_hash_table_t ht_energy_index; // lookup table; } hashtable_list; struct dp_counts_per_energy { hashtable_list *n_ij_e; hashtable_list *n_ij_A_e; hashtable_list *n_ij_M_e; hashtable_list *n_ij_M1_e; }; static hashtable_list create_hashtable_list(int hashbits) { hashtable_list ht_list; ht_list.allocated_size = 10; ht_list.length = 0; ht_list.list_energy_count_pairs = vrna_alloc(sizeof(energy_count) * ht_list.allocated_size); ht_list.list_key_value_pairs = vrna_alloc(sizeof(key_value *) * ht_list.allocated_size); ht_list.ht_energy_index = create_hashtable(hashbits); return ht_list; } static void free_hashtable_list(hashtable_list *ht_list) { vrna_ht_free(ht_list->ht_energy_index); free(ht_list->list_energy_count_pairs); int i = 0; for (; i < ht_list->length; i++) free(ht_list->list_key_value_pairs[i]); free(ht_list->list_key_value_pairs); } static void hashtable_list_add_count(hashtable_list *htl, int energy, double count) { if (htl->ht_energy_index != NULL) { key_value to_check; to_check.key = energy; //to_check.value = count; key_value *lookup_result = NULL; lookup_result = vrna_ht_get(htl->ht_energy_index, (void *)&to_check); if (lookup_result == NULL) { //value is not in list. if (htl->length >= htl->allocated_size) { htl->allocated_size += 10; htl->list_energy_count_pairs = vrna_realloc(htl->list_energy_count_pairs, sizeof(energy_count) * htl->allocated_size); htl->list_key_value_pairs = vrna_realloc(htl->list_key_value_pairs, sizeof(key_value *) * htl->allocated_size); } energy_count ec; ec.count = count; ec.energy = energy; int list_index = htl->length; htl->list_energy_count_pairs[list_index] = ec; to_check.value = list_index; key_value *to_store = vrna_alloc(sizeof(key_value)); *to_store = to_check; htl->list_key_value_pairs[list_index] = to_store; htl->length++; key_value *to_insert = htl->list_key_value_pairs[list_index]; int res = vrna_ht_insert(htl->ht_energy_index, (void *)to_insert); if (res != 0) fprintf(stderr, "dos.c: hash table insert failed!"); } else { // the energy-index pair is already in the list. int list_index = lookup_result->value; htl->list_energy_count_pairs[list_index].count += count; } } } static double lookup_count_from_energy(hashtable_list *htl, int energy) { key_value to_check; to_check.key = energy; key_value *lookup_result; lookup_result = vrna_ht_get(htl->ht_energy_index, (void *)&to_check); if (lookup_result == NULL) { //value is not in list. return 0; } else { int index = lookup_result->value; return htl->list_energy_count_pairs[index].count; } } static double lookup_count_from_index(hashtable_list *htl, int index) { if (index < 0) return 0; return htl->list_energy_count_pairs[index].count; } int lookup_energy_from_index(hashtable_list *htl, int index) { if (index < 0) return 1000000; return htl->list_energy_count_pairs[index].energy; } PRIVATE INLINE int decompose_pair(vrna_fold_compound_t *fc, int i, int j, int min_energy, int max_e, struct dp_counts_per_energy *dp_count_matrix_pt) { hashtable_list *source_table; hashtable_list *source_table_2; hashtable_list *result_table; int turn = fc->params->model_details.min_loop_size; int ij = fc->jindx[j] + i; int *rtype = &(fc->params->model_details.rtype[0]); short *S1 = fc->sequence_encoding; int type = fc->ptype[fc->jindx[j] + i]; int no_close = (((type == 3) || (type == 4)) && fc->params->model_details.noGUclosure); /* do we evaluate this pair? */ if (type) { /* check for hairpin loop */ int energy_hp = E_Hairpin(j - i - 1, type, S1[i + 1], S1[j - 1], fc->sequence + i - 1, fc->params); if (energy_hp <= max_e) { result_table = &dp_count_matrix_pt->n_ij_e[ij]; hashtable_list_add_count(result_table, energy_hp, 1); } /* check for interior loops */ int maxp = MIN2(j - 2 - turn, i + MAXLOOP + 1); int p, q, type_2, pq; int energy; for (p = i + 1; p <= maxp; p++) { unsigned int minq = p + turn + 1; for (q = minq; q < j; q++) { pq = fc->jindx[q] + p; /* set distance to reference structure... */ type_2 = fc->ptype[fc->jindx[q] + p]; if (type_2 == 0) continue; type_2 = rtype[type_2]; if (no_closingGU) if (no_close || (type_2 == 3) || (type_2 == 4)) if ((p > i + 1) || (q < j - 1)) continue; /* continue unless stack */ energy = E_IntLoop(p - i - 1, j - q - 1, type, type_2, S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], fc->params); source_table = &dp_count_matrix_pt->n_ij_e[pq]; if (source_table->length > 0) { result_table = &dp_count_matrix_pt->n_ij_e[ij]; for (int ei = 0; ei < source_table->length; ei++) { int pq_energy = lookup_energy_from_index(source_table, ei); double pq_count = lookup_count_from_index(source_table, ei); int sum_energy = pq_energy + energy; if ((sum_energy >= min_energy) && (sum_energy <= max_e)) hashtable_list_add_count(result_table, sum_energy, pq_count); } } } /* end q-loop */ } /* end p-loop */ //new_c = MIN2(new_c, min_int); /* check for multibranch loops */ if (!no_close) { /* dangle energies for multiloop closing stem */ int tt = rtype[type]; int temp2 = fc->params->MLclosing; if (dangles == 2) temp2 += E_MLstem(tt, S1[j - 1], S1[i + 1], fc->params); else temp2 += E_MLstem(tt, -1, -1, fc->params); int u; for (u = i + turn + 2; u < j - turn - 2; u++) { int i1u = fc->jindx[u] + (i + 1); int u1j1 = fc->jindx[j - 1] + (u + 1); source_table = &dp_count_matrix_pt->n_ij_M_e[i1u]; source_table_2 = &dp_count_matrix_pt->n_ij_M1_e[u1j1]; if (source_table->length > 0 && source_table_2->length > 0) { result_table = &dp_count_matrix_pt->n_ij_e[ij]; for (int ei_1 = 0; ei_1 < source_table->length; ei_1++) { int m_energy = lookup_energy_from_index(source_table, ei_1); double m_count = lookup_count_from_index(source_table, ei_1); for (int ei_2 = 0; ei_2 < source_table_2->length; ei_2++) { int m1_energy = lookup_energy_from_index(source_table_2, ei_2); double m1_count = lookup_count_from_index(source_table_2, ei_2); int sum_energy = m_energy + m1_energy + temp2; if ((sum_energy >= min_energy) && (sum_energy <= max_e)) { double c_count = m_count * m1_count; hashtable_list_add_count(result_table, sum_energy, c_count); } } } } } } } /* end >> if (pair) << */ return 0; } int print_array_energy_counts(hashtable_list *matrix, int length_col, int min_energy, int max_energy) { int j = length_col; if (matrix[j].length > 0) { for (int e = min_energy; e <= max_energy; e++) { double count = lookup_count_from_energy(&matrix[j], e); if (count > 0) printf("%6.2f\t%10.4g\n", e / 100., count); } } printf("\n"); return 0; } /* fill DP matrices */ PRIVATE void compute_density_of_states(vrna_fold_compound_t *fc, int max_energy_input, int hashbits, int verbose) { int i, j, ij, length, turn, *indx; vrna_param_t *P; length = (int)fc->length; indx = fc->jindx; P = fc->params; turn = P->model_details.min_loop_size; int dangles = P->model_details.dangles; short *S1 = fc->sequence_encoding; hashtable_list *source_table; hashtable_list *source_table_2; hashtable_list *result_table; hashtable_list *n_ij_e = (hashtable_list *)vrna_alloc(sizeof(hashtable_list) * ((length + 1) * (length + 1))); hashtable_list *n_ij_A_e = (hashtable_list *)vrna_alloc(sizeof(hashtable_list) * (length + 1)); hashtable_list *n_ij_M_e = (hashtable_list *)vrna_alloc(sizeof(hashtable_list) * ((length + 1) * (length + 1))); hashtable_list *n_ij_M1_e = (hashtable_list *)vrna_alloc(sizeof(hashtable_list) * ((length + 1) * (length + 1))); struct dp_counts_per_energy count_matrix_pt; count_matrix_pt.n_ij_e = n_ij_e; count_matrix_pt.n_ij_A_e = n_ij_A_e; count_matrix_pt.n_ij_M_e = n_ij_M_e; count_matrix_pt.n_ij_M1_e = n_ij_M1_e; /* compute mfe and search the matrices for the minimal energy contribution */ int min_energy = (int)round(vrna_mfe(fc, NULL) * 100.0); if (verbose) printf("min_energy (global): %d \n", min_energy); /* search through DP matrices for minimal entry */ for (int i = 1; i < length; i++) for (int j = i + 1; j <= length; j++) { ij = indx[j] + i; int e = fc->matrices->c[ij]; if (e < min_energy) min_energy = e; e = fc->matrices->fML[ij]; if (e < min_energy) min_energy = e; } for (int i = 1; i <= length; i++) { int e = fc->matrices->f5[i]; if (e < min_energy) min_energy = e; } // clean mfe fold matrices (we need only counts) vrna_mx_mfe_free(fc); int max_energy = max_energy_input; /* compute max_energy */ if (min_energy < 0) /* increase internal energy threshold in order to count * structures at the border correctly. */ max_energy = max_energy - 2 * min_energy; int step_energy = 1; // 1 decakal. (smallest unit of energy computations) int range = max_energy - min_energy; int energy_length = range + 1; // ceil (range / (float) step_energy) + 1; if (verbose) { printf("min_energy: %d \n", min_energy); printf("max_energy: %d %d\n", max_energy, min_energy + (step_energy * (energy_length - 1))); printf("range: %d, energy_length: %d\n", range, energy_length); } /* start recursion */ if (length <= turn) /* only the open chain is possible */ return; //for (i = length - turn - 1; i >= 1; i--) { // for (j = i + turn + 1; j <= length; j++) { int d; for (d = turn + 2; d <= length; d++) { /* i,j in [1..length] */ #ifdef _OPENMP #pragma omp parallel for private(j, i, ij, source_table, source_table_2, result_table) #endif for (j = d; j <= length; j++) { i = j - d + 1; ij = indx[j] + i; int type = fc->ptype[fc->jindx[j] + i]; //prepare matrices (add third dimension) count_matrix_pt.n_ij_e[ij] = create_hashtable_list(hashbits); count_matrix_pt.n_ij_M_e[ij] = create_hashtable_list(hashbits); count_matrix_pt.n_ij_M1_e[ij] = create_hashtable_list(hashbits); /* decompose subsegment [i, j] with pair (i, j) */ decompose_pair(fc, i, j, min_energy, max_energy, &count_matrix_pt); /* decompose subsegment [i, j] that is multibranch loop part with at least one branch */ int temp2 = 0; if (dangles == 2) temp2 += E_MLstem(type, (i == 1) ? S1[length] : S1[i - 1], S1[j + 1], P); else temp2 += E_MLstem(type, -1, -1, P); /* * now to the actual computations... * 1st E_M[ij] = E_M1[ij] = E_C[ij] + b */ source_table = &count_matrix_pt.n_ij_e[ij]; if (source_table->length > 0) { result_table = &count_matrix_pt.n_ij_M1_e[ij]; hashtable_list *result_table_2 = &count_matrix_pt.n_ij_M_e[ij]; for (int ei = 0; ei < source_table->length; ei++) { int c_energy = lookup_energy_from_index(source_table, ei); double c_count = lookup_count_from_index(source_table, ei); int sum_energy = c_energy + temp2; if ((sum_energy >= min_energy) && (sum_energy <= max_energy)) { hashtable_list_add_count(result_table, sum_energy, c_count); hashtable_list_add_count(result_table_2, sum_energy, c_count); } } } /* 2rd E_M[ij] = MIN(E_M[ij], E_M[i,j-1] + c) */ source_table = &count_matrix_pt.n_ij_M_e[fc->jindx[j - 1] + i]; if (source_table->length > 0) { result_table = &count_matrix_pt.n_ij_M_e[ij]; for (int ei = 0; ei < source_table->length; ei++) { int m_energy = lookup_energy_from_index(source_table, ei); double m_count = lookup_count_from_index(source_table, ei); int sum_energy = m_energy + P->MLbase; if ((sum_energy >= min_energy) && (sum_energy <= max_energy)) hashtable_list_add_count(result_table, sum_energy, m_count); } } /* 3th E_M1[ij] = MIN(E_M1[ij], E_M1[i,j-1] + c) */ source_table = &count_matrix_pt.n_ij_M1_e[fc->jindx[j - 1] + i]; if (source_table->length > 0) { result_table = &count_matrix_pt.n_ij_M1_e[ij]; for (int ei = 0; ei < source_table->length; ei++) { int m1_energy = lookup_energy_from_index(source_table, ei); double m1_count = lookup_count_from_index(source_table, ei); int sum_energy = m1_energy + P->MLbase; if ((sum_energy >= min_energy) && (sum_energy <= max_energy)) hashtable_list_add_count(result_table, sum_energy, m1_count); } } if (j > turn + 2) { int u; int temp3; for (u = i; u < j; u++) { int u1j = fc->jindx[j] + u + 1; int iu = fc->jindx[u] + i; source_table = &count_matrix_pt.n_ij_e[u1j]; if (source_table->length > 0) { type = fc->ptype[u1j]; /* [i..u] is unpaired */ if (dangles == 2) temp2 = E_MLstem(type, S1[u], S1[j + 1], P); else temp2 = E_MLstem(type, -1, -1, P); temp3 = temp2 + (u - i + 1) * P->MLbase; result_table = &count_matrix_pt.n_ij_M_e[ij]; for (int ei = 0; ei < source_table->length; ei++) { int c_energy = lookup_energy_from_index(source_table, ei); double c_count = lookup_count_from_index(source_table, ei); int sum_energy = c_energy + temp3; if ((sum_energy >= min_energy) && (sum_energy <= max_energy)) hashtable_list_add_count(result_table, sum_energy, c_count); } /* [i...u] has at least one stem */ source_table_2 = &count_matrix_pt.n_ij_M_e[iu]; if (source_table_2->length > 0) { source_table = &count_matrix_pt.n_ij_e[u1j]; result_table = &count_matrix_pt.n_ij_M_e[ij]; for (int ei_1 = 0; ei_1 < source_table->length; ei_1++) { int c_energy = lookup_energy_from_index(source_table, ei_1); double c_count = lookup_count_from_index(source_table, ei_1); for (int ei_2 = 0; ei_2 < source_table_2->length; ei_2++) { int m_energy = lookup_energy_from_index(source_table_2, ei_2); double m_count = lookup_count_from_index(source_table_2, ei_2); int sum_energy = c_energy + m_energy + temp2; if ((sum_energy >= min_energy) && (sum_energy <= max_energy)) { double product_count = c_count * m_count; hashtable_list_add_count(result_table, sum_energy, product_count); } } } } } } } } /* end of j-loop */ } /* end of i-loop */ /* calculate energies of 5' fragments */ int x; #ifdef _OPENMP #pragma omp parallel for private(x) #endif for (x = 0; x <= length; x++) count_matrix_pt.n_ij_A_e[x] = create_hashtable_list(hashbits); int cnt1; #ifdef _OPENMP #pragma omp parallel for private(cnt1) #endif for (cnt1 = 1; cnt1 <= turn + 1; cnt1++) { int c_energy = 0; double c_count = 1; result_table = &count_matrix_pt.n_ij_A_e[cnt1]; hashtable_list_add_count(result_table, c_energy, c_count); } for (j = turn + 2; j <= length; j++) { /* j-1 is unpaired ... */ source_table = &count_matrix_pt.n_ij_A_e[j - 1]; result_table = &count_matrix_pt.n_ij_A_e[j]; int ei; for (ei = 0; ei < source_table->length; ei++) { int c_energy = lookup_energy_from_index(source_table, ei); double c_count = lookup_count_from_index(source_table, ei); hashtable_list_add_count(result_table, c_energy, c_count); } /* j pairs with 1 */ ij = fc->jindx[j] + 1; int type = fc->ptype[ij]; int additional_en = 0; if (type) { if (dangles == 2) additional_en += E_ExtLoop(type, -1, j < length ? S1[j + 1] : -1, P); else additional_en += E_ExtLoop(type, -1, -1, P); } source_table = &count_matrix_pt.n_ij_e[ij]; if (source_table->length > 0) { result_table = &count_matrix_pt.n_ij_A_e[j]; int ei; for (ei = 0; ei < source_table->length; ei++) { int c_energy = lookup_energy_from_index(source_table, ei); double c_count = lookup_count_from_index(source_table, ei); int sum_energy = c_energy + additional_en; if ((sum_energy >= min_energy) && (sum_energy <= max_energy)) hashtable_list_add_count(result_table, sum_energy, c_count); } } /* j pairs with some other nucleotide -> see below */ for (i = j - turn - 1; i > 1; i--) { ij = fc->jindx[j] + i; type = fc->ptype[ij]; if (type) { if (dangles == 2) additional_en = E_ExtLoop(type, S1[i - 1], j < length ? S1[j + 1] : -1, P); else additional_en = E_ExtLoop(type, -1, -1, P); source_table = &count_matrix_pt.n_ij_e[ij]; source_table_2 = &count_matrix_pt.n_ij_A_e[i - 1]; if (source_table->length > 0 && source_table_2->length > 0) { result_table = &count_matrix_pt.n_ij_A_e[j]; int ei_1; for (ei_1 = 0; ei_1 < source_table->length; ei_1++) { int c_energy = lookup_energy_from_index(source_table, ei_1); double c_count = lookup_count_from_index(source_table, ei_1); int ei_2; for (ei_2 = 0; ei_2 < source_table_2->length; ei_2++) { int f5_energy = lookup_energy_from_index(source_table_2, ei_2); double f5_count = lookup_count_from_index(source_table_2, ei_2); int sum_energy = c_energy + f5_energy + additional_en; if ((sum_energy >= min_energy) && (sum_energy <= max_energy)) { double product_count = c_count * f5_count; hashtable_list_add_count(result_table, sum_energy, product_count); } } } } } } } printf("Energy bands with counted structures:\n"); print_array_energy_counts(count_matrix_pt.n_ij_A_e, length, min_energy, max_energy_input); for (i = length - turn - 1; i >= 1; i--) { #ifdef _OPENMP #pragma omp parallel for private(j, ij) #endif for (j = i + turn + 1; j <= length; j++) { ij = indx[j] + i; if (count_matrix_pt.n_ij_e[ij].allocated_size > 0) free_hashtable_list(&count_matrix_pt.n_ij_e[ij]); if (count_matrix_pt.n_ij_M_e[ij].allocated_size > 0) free_hashtable_list(&count_matrix_pt.n_ij_M_e[ij]); if (count_matrix_pt.n_ij_M1_e[ij].allocated_size > 0) free_hashtable_list(&count_matrix_pt.n_ij_M1_e[ij]); } } #ifdef _OPENMP #pragma omp parallel for private(i) #endif for (i = 0; i <= length; i++) free_hashtable_list(&count_matrix_pt.n_ij_A_e[i]); free(count_matrix_pt.n_ij_e); free(count_matrix_pt.n_ij_A_e); free(count_matrix_pt.n_ij_M_e); free(count_matrix_pt.n_ij_M1_e); } char * read_sequence_from_stdin() { char *rec_sequence, *rec_id, **rec_rest; unsigned int rec_type; unsigned int read_opt = 0; rec_id = NULL; rec_rest = NULL; rec_type = vrna_file_fasta_read_record(&rec_id, &rec_sequence, &rec_rest, stdin, read_opt); if (rec_type & (VRNA_INPUT_ERROR | VRNA_INPUT_QUIT)) return ""; char *result_sequence = NULL; if (rec_type & VRNA_INPUT_SEQUENCE) result_sequence = rec_sequence; return result_sequence; } int main(int argc, char *argv[]) { struct RNAdos_args_info args_info; vrna_md_t md; set_model_details(&md); md.uniq_ML = 1; md.noLP = 0; md.circ = 0; md.dangles = 2; int verbose = 0; int max_energy = 0; int hash_bits = 20; char *ParamFile = NULL; /* ############################################# # check the command line prameters ############################################# */ if (RNAdos_cmdline_parser(argc, argv, &args_info) != 0) exit(1); char *rnaSequence = NULL; if (!args_info.sequence_given) { rnaSequence = read_sequence_from_stdin(); if (rnaSequence == NULL) { fprintf(stderr, "No RNA sequence given!"); exit(1); } } else { rnaSequence = args_info.sequence_arg; } /* temperature */ if (args_info.temp_given) md.temperature = temperature = args_info.temp_arg; /* dangle options */ if (args_info.dangles_given) { if ((args_info.dangles_arg != 0) && (args_info.dangles_arg != 2)) vrna_message_warning( "required dangle model not implemented, falling back to default dangles=2"); else md.dangles = dangles = args_info.dangles_arg; } if (args_info.verbose_given) verbose = 1; if (args_info.max_energy_given) max_energy = args_info.max_energy_arg; if (args_info.hashtable_bits_given) hash_bits = args_info.hashtable_bits_arg; /* set number of threads for parallel computation */ if (args_info.numThreads_given) { #ifdef _OPENMP omp_set_num_threads(args_info.numThreads_arg); omp_set_dynamic(0); #endif } /* get energy parameter file name */ if (args_info.paramFile_given) ParamFile = strdup(args_info.paramFile_arg); /* free allocated memory of command line data structure */ RNAdos_cmdline_parser_free(&args_info); if (ParamFile != NULL) { if (!strcmp(ParamFile, "DNA")) vrna_params_load_DNA_Mathews2004(); else vrna_params_load(ParamFile, VRNA_PARAMETER_FORMAT_DEFAULT); } if (verbose) printf("%s\n", rnaSequence); vrna_fold_compound_t *fc = vrna_fold_compound(rnaSequence, &md, VRNA_OPTION_DEFAULT); if (!vrna_fold_compound_prepare(fc, VRNA_OPTION_MFE)) vrna_message_warning("vrna_mfe@mfe.c: Failed to prepare vrna_fold_compound"); int max_energy_dcal = max_energy * 100; compute_density_of_states(fc, max_energy_dcal, hash_bits, verbose); free(rnaSequence); vrna_fold_compound_free(fc); return EXIT_SUCCESS; }
set.h
#ifndef __SET_H__ #define __SET_H__ #include "../tensor/algstrct.h" #include "functions.h" //#include <stdint.h> #include <limits> #include <inttypes.h> #include "../shared/memcontrol.h" #ifdef _OPENMP #include <omp.h> #endif namespace CTF { /** * \brief index-value pair used for tensor data input */ template<typename dtype=double> class Pair { public: /** \brief key, global index [i1,i2,...] specified as i1+len[0]*i2+... */ int64_t k; /** \brief tensor value associated with index */ dtype d; /** * \brief constructor builds pair * \param[in] k_ key * \param[in] d_ value */ Pair(int64_t k_, dtype d_){ this->k = k_; d = d_; } /** * \brief default constructor */ Pair(){ //k=0; //d=0; //(not possible if type has no zero!) } /** * \brief determines pair ordering */ bool operator<(Pair<dtype> other) const { return k<other.k; } }; template<typename dtype> inline bool comp_pair(Pair<dtype> i, Pair<dtype> j) { return (i.k<j.k); } } namespace CTF_int { //does conversion using MKL function if it is available bool try_mkl_coo_to_csr(int64_t nz, int nrow, char * csr_vs, int * csr_ja, int * csr_ia, char const * coo_vs, int const * coo_rs, int const * coo_cs, int el_size); bool try_mkl_csr_to_coo(int64_t nz, int nrow, char const * csr_vs, int const * csr_ja, int const * csr_ia, char * coo_vs, int * coo_rs, int * coo_cs, int el_size); template <typename dtype> void seq_coo_to_csr(int64_t nz, int nrow, dtype * csr_vs, int * csr_ja, int * csr_ia, dtype const * coo_vs, int const * coo_rs, int const * coo_cs){ int sz = sizeof(dtype); if (sz == 4 || sz == 8 || sz == 16){ bool b = try_mkl_coo_to_csr(nz, nrow, (char*)csr_vs, csr_ja, csr_ia, (char const*)coo_vs, coo_rs, coo_cs, sz); if (b) return; } #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t i=0; i<nz; i++){ csr_ja[i] = i; //printf("csr_ja[%d/%d] = %d\n",i,nz,csr_ja[i]); } class comp_ref { public: int const * a; comp_ref(int const * a_){ a = a_; } bool operator()(int u, int v){ return a[u] < a[v]; } }; comp_ref crc(coo_cs); std::sort(csr_ja, csr_ja+nz, crc); comp_ref crr(coo_rs); std::stable_sort(csr_ja, csr_ja+nz, crr); // do not copy by value in case values are objects, then csr_vs is uninitialized //printf("csr nz = %ld\n",nz); #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t i=0; i<nz; i++){ //printf("%d, %d, %ld\n",(int)((char*)(coo_vs+csr_ja[i])-(char*)(coo_vs))-csr_ja[i]*sizeof(dtype),sizeof(dtype),csr_ja[i]); // memcpy(csr_vs+i, coo_vs+csr_ja[i]-1,sizeof(dtype)); //memcpy(csr_vs+i, coo_vs+csr_ja[i],sizeof(dtype)); csr_vs[i] = coo_vs[csr_ja[i]]; // printf("i %ld csr_ja[i] %d\n", i, csr_ja[i]); // printf("i %ld v %lf\n", i, csr_vs[i]); //printf("%p %d\n",coo_vs+i,*(int32_t*)(coo_vs+i)); } #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t i=0; i<nz; i++){ csr_ja[i] = coo_cs[csr_ja[i]]; } csr_ia[0] = 1; #ifdef _OPENMP #pragma omp parallel for #endif for (int i=1; i<nrow+1; i++){ csr_ia[i] = 0; } #ifdef _OPENMP int * scoo_rs = (int*)CTF_int::alloc(sizeof(int)*nz); memcpy(scoo_rs, coo_rs, nz*sizeof(int)); std::sort(scoo_rs,scoo_rs+nz); #pragma omp parallel { int tid = omp_get_thread_num(); int ntd = omp_get_num_threads(); int64_t i_st = tid*(nz/ntd)+std::min(tid,(int)(nz%ntd)); int64_t i_end = (tid+1)*(nz/ntd)+std::min((tid+1),(int)(nz%ntd)); while (i_st > 0 && i_st < nz && scoo_rs[i_st] == scoo_rs[i_st-1]) i_st++; while (i_end < nz && scoo_rs[i_end] == scoo_rs[i_end-1]) i_end++; for (int64_t i=i_st; i<i_end; i++){ csr_ia[scoo_rs[i]]++; } } CTF_int::cdealloc(scoo_rs); #else for (int64_t i=0; i<nz; i++){ //printf("scoo_rs[%d]=%d\n",i,scoo_rs[i]); csr_ia[coo_rs[i]]++; } #endif #ifdef _OPENMP //int * csr_ia2 = (int*)CTF_int::alloc(sizeof(int)*(nrow+1)); //CTF_int::prefix<int>(nrow+1, csr_ia, csr_ia2); ////memcpy(csr_ia, csr_ia2, nrow*sizeof(int)); //#pragma omp parallel for //for (int i=0; i<nrow+1; i++){ // assert((i==0 && csr_ia2[i] == 0) || csr_ia[i-1] == csr_ia2[i]); // csr_ia[i] += csr_ia2[i]; // printf("csr_ia[%d/%d] = %d\n",i,nrow,csr_ia[i]); //} //CTF_int::cdealloc(csr_ia2); CTF_int::parallel_postfix<int>(nrow+1, 1, csr_ia); #else for (int i=0; i<nrow; i++){ csr_ia[i+1] += csr_ia[i]; //printf("csr_ia[%d/%d] = %d\n",i,nrow,csr_ia[i]); } #endif } template <typename dtype> void seq_coo_to_ccsr(int64_t nz, int64_t nnz_row, dtype * ccsr_vs, int * ccsr_ja, int * ccsr_ia, dtype const * coo_vs, int64_t const * coo_rs, int64_t const * coo_cs){ #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t i=0; i<nz; i++){ ccsr_ja[i] = i; //printf("ccsr_ja[%d/%d] = %d\n",i,nz,ccsr_ja[i]); } class comp_ref { public: int64_t const * a; comp_ref(int64_t const * a_){ a = a_; } bool operator()(int u, int v){ return a[u] < a[v]; } }; comp_ref crc(coo_cs); std::sort(ccsr_ja, ccsr_ja+nz, crc); comp_ref crr(coo_rs); std::stable_sort(ccsr_ja, ccsr_ja+nz, crr); // do not copy by value in case values are objects, then ccsr_vs is uninitialized //printf("ccsr nz = %ld\n",nz); #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t i=0; i<nz; i++){ //printf("%d, %d, %ld\n",(int)((char*)(coo_vs+ccsr_ja[i])-(char*)(coo_vs))-ccsr_ja[i]*sizeof(dtype),sizeof(dtype),ccsr_ja[i]); // memcpy(ccsr_vs+i, coo_vs+ccsr_ja[i]-1,sizeof(dtype)); //memcpy(ccsr_vs+i, coo_vs+ccsr_ja[i],sizeof(dtype)); ccsr_vs[i] = coo_vs[ccsr_ja[i]]; // printf("i %ld ccsr_ja[i] %d\n", i, ccsr_ja[i]); // printf("i %ld v %lf\n", i, ccsr_vs[i]); //printf("%p %d\n",coo_vs+i,*(int32_t*)(coo_vs+i)); } ccsr_ia[0] = 1; ccsr_ia[1] = 1 + (nz>0); //FIXME: parallelize int64_t cia = 1; for (int64_t i=1; i<nz; i++){ if (coo_rs[ccsr_ja[i]] > coo_rs[ccsr_ja[i-1]]){ cia++; ccsr_ia[cia] = ccsr_ia[cia-1]; } ccsr_ia[cia]++; } //#ifdef _OPENMP // #pragma omp parallel for //#endif // for (int i=0; i<nnz_row; i++){ // ccsr_ia[i+1] += ccsr_ia[i]; // //printf("ccsr_ia[%d/%d] = %d\n",i,nrow,ccsr_ia[i]); // } #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t i=0; i<nz; i++){ ccsr_ja[i] = coo_cs[ccsr_ja[i]]; } } template <typename dtype> void seq_csr_to_coo(int64_t nz, int nrow, dtype const * csr_vs, int const * csr_ja, int const * csr_ia, dtype * coo_vs, int * coo_rs, int * coo_cs){ int sz = sizeof(dtype); if (sz == 4 || sz == 8 || sz == 16){ bool b = try_mkl_csr_to_coo(nz, nrow, (char const*)csr_vs, csr_ja, csr_ia, (char*)coo_vs, coo_rs, coo_cs, sz); if (b) return; } //memcpy(coo_vs, csr_vs, sizeof(dtype)*nz); std::copy(csr_vs, csr_vs+nz, coo_vs); memcpy(coo_cs, csr_ja, sizeof(int)*nz); for (int i=0; i<nrow; i++){ std::fill(coo_rs+csr_ia[i]-1, coo_rs+csr_ia[i+1]-1, i+1); } } template <typename dtype> void def_coo_to_ccsr(int64_t nz, int64_t nnz_row, dtype * ccsr_vs, int * ccsr_ja, int * ccsr_ia, dtype const * coo_vs, int64_t const * coo_rs, int64_t const * coo_cs){ seq_coo_to_ccsr<dtype>(nz, nnz_row, ccsr_vs, ccsr_ja, ccsr_ia, coo_vs, coo_rs, coo_cs); } template <typename dtype> void def_coo_to_csr(int64_t nz, int nrow, dtype * csr_vs, int * csr_ja, int * csr_ia, dtype const * coo_vs, int const * coo_rs, int const * coo_cs){ seq_coo_to_csr<dtype>(nz, nrow, csr_vs, csr_ja, csr_ia, coo_vs, coo_rs, coo_cs); } template <typename dtype> void def_csr_to_coo(int64_t nz, int nrow, dtype const * csr_vs, int const * csr_ja, int const * csr_ia, dtype * coo_vs, int * coo_rs, int * coo_cs){ seq_csr_to_coo<dtype>(nz, nrow, csr_vs, csr_ja, csr_ia, coo_vs, coo_rs, coo_cs); } template <typename dtype> void seq_ccsr_to_coo(int64_t nz, int64_t nnz_row, dtype const * ccsr_vs, int const * ccsr_ja, int const * ccsr_ia, int64_t const * row_enc, dtype * coo_vs, int64_t * coo_rs, int64_t * coo_cs){ //memcpy(coo_vs, ccsr_vs, sizeof(dtype)*nz); std::copy(ccsr_vs, ccsr_vs+nz, coo_vs); #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t i=0; i<nz; i++){ coo_cs[i] = ccsr_ja[i]; } #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t i=0; i<nnz_row; i++){ std::fill(coo_rs+ccsr_ia[i]-1, coo_rs+ccsr_ia[i+1]-1, row_enc[i]); } } template <typename dtype> void def_coo_to_ccsr(int64_t nz, int64_t nnz_row, dtype * ccsr_vs, int * ccsr_ja, int * ccsr_ia, int const * row_enc, dtype const * coo_vs, int64_t const * coo_rs, int64_t const * coo_cs){ seq_coo_to_ccsr<dtype>(nz, nnz_row, ccsr_vs, ccsr_ja, ccsr_ia, row_enc, coo_vs, coo_rs, coo_cs); } template <typename dtype> void def_ccsr_to_coo(int64_t nz, int64_t nnz_row, dtype const * ccsr_vs, int const * ccsr_ja, int const * ccsr_ia, int64_t const * row_enc, dtype * coo_vs, int64_t * coo_rs, int64_t * coo_cs){ seq_ccsr_to_coo<dtype>(nz, nnz_row, ccsr_vs, ccsr_ja, ccsr_ia, row_enc, coo_vs, coo_rs, coo_cs); } template <typename dtype> bool default_isequal(dtype a, dtype b){ int sz = sizeof(dtype); for (int i=0; i<sz; i++){ if (((char const *)&a)[i] != ((char const *)&b)[i]){ return false; } } return true; } template <typename dtype> dtype default_addinv(dtype a){ return -a; } template <typename dtype, bool is_ord> inline typename std::enable_if<is_ord, dtype>::type default_abs(dtype a){ dtype b = default_addinv<dtype>(a); return a>=b ? a : b; } template <typename dtype, bool is_ord> inline typename std::enable_if<!is_ord, dtype>::type default_abs(dtype a){ printf("CTF ERROR: cannot compute abs unless the set is ordered"); assert(0); return a; } template <typename dtype, dtype (*abs)(dtype)> void char_abs(char const * a, char * b){ ((dtype*)b)[0]=abs(((dtype const*)a)[0]); } //C++14 support needed for these std::enable_if template <typename dtype, bool is_ord> inline typename std::enable_if<is_ord, dtype>::type default_min(dtype a, dtype b){ return a>b ? b : a; } template <typename dtype, bool is_ord> inline typename std::enable_if<!is_ord, dtype>::type default_min(dtype a, dtype b){ printf("CTF ERROR: cannot compute a max unless the set is ordered"); assert(0); return a; } template <typename dtype, bool is_ord> inline typename std::enable_if<is_ord, dtype>::type default_max_lim(){ return std::numeric_limits<dtype>::max(); } template <typename dtype, bool is_ord> inline typename std::enable_if<!is_ord, dtype>::type default_max_lim(){ printf("CTF ERROR: cannot compute a max unless the set is ordered"); assert(0); dtype * a = NULL; return *a; } template <typename dtype, bool is_ord> inline typename std::enable_if<is_ord, dtype>::type default_min_lim(){ return std::numeric_limits<dtype>::min(); } template <typename dtype, bool is_ord> inline typename std::enable_if<!is_ord, dtype>::type default_min_lim(){ printf("CTF ERROR: cannot compute a max unless the set is ordered"); assert(0); dtype * a = NULL; return *a; } template <typename dtype, bool is_ord> inline typename std::enable_if<is_ord, dtype>::type default_max(dtype a, dtype b){ return b>a ? b : a; } template <typename dtype, bool is_ord> inline typename std::enable_if<!is_ord, dtype>::type default_max(dtype a, dtype b){ printf("CTF ERROR: cannot compute a min unless the set is ordered"); assert(0); return a; } template <typename dtype> MPI_Datatype get_default_mdtype(bool & is_custom){ MPI_Datatype newtype; MPI_Type_contiguous(sizeof(dtype), MPI_BYTE, &newtype); MPI_Type_commit(&newtype); is_custom = true; return newtype; } extern MPI_Datatype MPI_CTF_BOOL; extern MPI_Datatype MPI_CTF_DOUBLE_COMPLEX; extern MPI_Datatype MPI_CTF_LONG_DOUBLE_COMPLEX; template <> inline MPI_Datatype get_default_mdtype<bool>(bool & is_custom){ is_custom=false; return MPI_CTF_BOOL; } template <> inline MPI_Datatype get_default_mdtype< std::complex<double> >(bool & is_custom){ is_custom=false; return MPI_CTF_DOUBLE_COMPLEX; } template <> inline MPI_Datatype get_default_mdtype< std::complex<long double> >(bool & is_custom){ is_custom=false; return MPI_CTF_LONG_DOUBLE_COMPLEX; } template <> inline MPI_Datatype get_default_mdtype<char>(bool & is_custom){ is_custom=false; return MPI_CHAR; } template <> inline MPI_Datatype get_default_mdtype<int>(bool & is_custom){ is_custom=false; return MPI_INT; } template <> inline MPI_Datatype get_default_mdtype<int64_t>(bool & is_custom){ is_custom=false; return MPI_INT64_T; } template <> inline MPI_Datatype get_default_mdtype<unsigned int>(bool & is_custom){ is_custom=false; return MPI_UNSIGNED; } template <> inline MPI_Datatype get_default_mdtype<uint64_t>(bool & is_custom){ is_custom=false; return MPI_UINT64_T; } template <> inline MPI_Datatype get_default_mdtype<float>(bool & is_custom){ is_custom=false; return MPI_FLOAT; } template <> inline MPI_Datatype get_default_mdtype<double>(bool & is_custom){ is_custom=false; return MPI_DOUBLE; } template <> inline MPI_Datatype get_default_mdtype<long double>(bool & is_custom){ is_custom=false; return MPI_LONG_DOUBLE; } template <> inline MPI_Datatype get_default_mdtype< std::complex<float> >(bool & is_custom){ is_custom=false; return MPI_COMPLEX; } template <typename dtype> constexpr bool get_default_is_ord(){ return false; } #define INST_ORD_TYPE(dtype) \ template <> \ constexpr bool get_default_is_ord<dtype>(){ \ return true; \ } INST_ORD_TYPE(float) INST_ORD_TYPE(double) INST_ORD_TYPE(long double) INST_ORD_TYPE(bool) INST_ORD_TYPE(char) INST_ORD_TYPE(int) INST_ORD_TYPE(unsigned int) INST_ORD_TYPE(int64_t) INST_ORD_TYPE(uint64_t) #define INST_IET(typ) \ template <> \ inline bool default_isequal<typ>(typ a, typ b){ \ return a==b; \ } \ INST_IET(float) INST_IET(double) INST_IET(std::complex<float>) INST_IET(std::complex<double>) INST_IET(bool) INST_IET(int) INST_IET(int16_t) INST_IET(int64_t) INST_IET(uint16_t) INST_IET(uint32_t) INST_IET(uint64_t) INST_IET(std::complex<long double>) INST_IET(long double) } namespace CTF { /** \brief pair for sorting */ template <typename dtype> struct dtypePair{ int64_t key; dtype data; bool operator < (const dtypePair<dtype>& other) const { return (key < other.key); } }; /** * \defgroup algstrct Algebraic Structures * \addtogroup algstrct * @{ */ /** * \brief Set class defined by a datatype and a min/max function (if it is partially ordered i.e. is_ord=true) * currently assumes min and max are given by numeric_limits (custom min/max not allowed) */ template <typename dtype=double, bool is_ord=CTF_int::get_default_is_ord<dtype>()> class Set : public CTF_int::algstrct { public: int pair_sz; bool is_custom_mdtype; MPI_Datatype tmdtype; ~Set(){ if (is_custom_mdtype) MPI_Type_free(&tmdtype); } Set(Set const & other) : CTF_int::algstrct(other) { if (other.is_custom_mdtype){ tmdtype = CTF_int::get_default_mdtype<dtype>(is_custom_mdtype); } else { this->tmdtype = other.tmdtype; is_custom_mdtype = false; } pair_sz = sizeof(std::pair<int64_t,dtype>); //printf("%ld %ld \n", sizeof(dtype), pair_sz); abs = other.abs; } int pair_size() const { //printf("%d %d \n", sizeof(dtype), pair_sz); return pair_sz; } int64_t get_key(char const * a) const { return ((std::pair<int64_t,dtype> const *)a)->first; } char * get_value(char * a) const { return (char*)&(((std::pair<int64_t,dtype> const *)a)->second); } char const * get_const_value(char const * a) const { return (char const *)&(((std::pair<int64_t,dtype> const *)a)->second); } virtual CTF_int::algstrct * clone() const { return new Set<dtype, is_ord>(*this); } bool is_ordered() const { return is_ord; } Set() : CTF_int::algstrct(sizeof(dtype)){ tmdtype = CTF_int::get_default_mdtype<dtype>(is_custom_mdtype); set_abs_to_default(); pair_sz = sizeof(std::pair<int64_t,dtype>); } void set_abs_to_default(){ abs = &CTF_int::char_abs< dtype, CTF_int::default_abs<dtype, is_ord> >; } MPI_Datatype mdtype() const { return tmdtype; } void min(char const * a, char const * b, char * c) const { ((dtype*)c)[0] = CTF_int::default_min<dtype,is_ord>(((dtype*)a)[0],((dtype*)b)[0]); } void max(char const * a, char const * b, char * c) const { ((dtype*)c)[0] = CTF_int::default_max<dtype,is_ord>(((dtype*)a)[0],((dtype*)b)[0]); } void min(char * c) const { ((dtype*)c)[0] = CTF_int::default_min_lim<dtype,is_ord>(); } void max(char * c) const { ((dtype*)c)[0] = CTF_int::default_max_lim<dtype,is_ord>(); } void cast_double(double d, char * c) const { //((dtype*)c)[0] = (dtype)d; printf("CTF ERROR: double cast not possible for this algebraic structure\n"); assert(0); } void cast_int(int64_t i, char * c) const { //((dtype*)c)[0] = (dtype)i; printf("CTF ERROR: integer cast not possible for this algebraic structure\n"); assert(0); } double cast_to_double(char const * c) const { printf("CTF ERROR: double cast not possible for this algebraic structure\n"); IASSERT(0); assert(0); return 0.0; } int64_t cast_to_int(char const * c) const { printf("CTF ERROR: int cast not possible for this algebraic structure\n"); assert(0); return 0; } void print(char const * a, FILE * fp=stdout) const { for (int i=0; i<el_size; i++){ fprintf(fp,"%x",a[i]); } } bool isequal(char const * a, char const * b) const { if (a == NULL && b == NULL) return true; if (a == NULL || b == NULL) return false; for (int i=0; i<el_size; i++){ if (a[i] != b[i]) return false; } return true; } void coo_to_csr(int64_t nz, int nrow, char * csr_vs, int * csr_ja, int * csr_ia, char const * coo_vs, int const * coo_rs, int const * coo_cs) const { CTF_int::def_coo_to_csr(nz, nrow, (dtype *)csr_vs, csr_ja, csr_ia, (dtype const *) coo_vs, coo_rs, coo_cs); } void csr_to_coo(int64_t nz, int nrow, char const * csr_vs, int const * csr_ja, int const * csr_ia, char * coo_vs, int * coo_rs, int * coo_cs) const { CTF_int::def_csr_to_coo(nz, nrow, (dtype const *)csr_vs, csr_ja, csr_ia, (dtype*) coo_vs, coo_rs, coo_cs); } void coo_to_ccsr(int64_t nz, int64_t nnz_row, char * ccsr_vs, int * ccsr_ja, int * ccsr_ia, char const * coo_vs, int64_t const * coo_rs, int64_t const * coo_cs) const { CTF_int::def_coo_to_ccsr(nz, nnz_row, (dtype *)ccsr_vs, ccsr_ja, ccsr_ia, (dtype const *) coo_vs, coo_rs, coo_cs); } void ccsr_to_coo(int64_t nz, int64_t nnz_row, char const * csr_vs, int const * csr_ja, int const * csr_ia, int64_t const * row_enc, char * coo_vs, int64_t * coo_rs, int64_t * coo_cs) const { CTF_int::def_ccsr_to_coo(nz, nnz_row, (dtype const *)csr_vs, csr_ja, csr_ia, row_enc, (dtype*) coo_vs, coo_rs, coo_cs); } char * pair_alloc(int64_t n) const { //assert(sizeof(std::pair<int64_t,dtype>[n])==(uint64_t)(pair_size()*n)); CTF_int::memprof_alloc_pre(n*sizeof(std::pair<int64_t,dtype>)); char * ptr = (char*)(new std::pair<int64_t,dtype>[n]); CTF_int::memprof_alloc_post(n*sizeof(std::pair<int64_t,dtype>),(void**)&ptr); return ptr; } char * alloc(int64_t n) const { //assert(sizeof(dtype[n])==(uint64_t)(el_size*n)); CTF_int::memprof_alloc_pre(n*sizeof(dtype)); char * ptr = (char*)(new dtype[n]); CTF_int::memprof_alloc_post(n*sizeof(dtype),(void**)&ptr); return ptr; } void dealloc(char * ptr) const { CTF_int::memprof_dealloc(ptr); return delete [] (dtype*)ptr; } void pair_dealloc(char * ptr) const { CTF_int::memprof_dealloc(ptr); return delete [] (std::pair<int64_t,dtype>*)ptr; } void sort(int64_t n, char * pairs) const { std::sort((dtypePair<dtype>*)pairs,((dtypePair<dtype>*)pairs)+n); } void copy(char * a, char const * b) const { ((dtype *)a)[0] = ((dtype const *)b)[0]; } void copy(char * a, char const * b, int64_t n) const { std::copy((dtype const *)b, ((dtype const *)b) + n, (dtype *)a); } void copy_pair(char * a, char const * b) const { ((std::pair<int64_t,dtype> *)a)[0] = ((std::pair<int64_t,dtype> const *)b)[0]; } void copy_pairs(char * a, char const * b, int64_t n) const { std::copy((std::pair<int64_t,dtype> const *)b, ((std::pair<int64_t,dtype> const *)b) + n, (std::pair<int64_t,dtype> *)a); //std::copy((std::pair<int64_t,dtype> *)a, (std::pair<int64_t,dtype> const *)b, n); //for (int64_t i=0; i<n; i++){ /*printf("i=%ld\n",i); this->print((char*)&(((std::pair<int64_t,dtype> const *)a)[i].second)); this->print((char*)&(((std::pair<int64_t,dtype> const *)b)[i].second));*/ //((std::pair<int64_t,dtype>*)a)[i] = ((std::pair<int64_t,dtype> const *)b)[i]; //this->print((char*)&(((std::pair<int64_t,dtype> const *)a)[i].second)); //} } void set(char * a, char const * b, int64_t n) const { if (n >= 100) { #ifdef _OPENMP dtype *ia = (dtype*)a; dtype ib = *((dtype*)b); #pragma omp parallel { int64_t tid = omp_get_thread_num(); int64_t chunksize = n / omp_get_num_threads(); dtype *begin = ia + chunksize * tid; dtype *end; if (tid == omp_get_num_threads() - 1) end = ia + n; else end = begin + chunksize; std::fill(begin, end, ib); } return; #endif } std::fill((dtype*)a, ((dtype*)a)+n, *((dtype*)b)); } void set_pair(char * a, int64_t key, char const * b) const { ((std::pair<int64_t,dtype> *)a)[0] = std::pair<int64_t,dtype>(key,*((dtype*)b)); } void set_pairs(char * a, char const * b, int64_t n) const { std::fill((std::pair<int64_t,dtype> *)a, (std::pair<int64_t,dtype> *)a + n, *(std::pair<int64_t,dtype> const*)b); } void copy(int64_t n, char const * a, int inc_a, char * b, int inc_b) const { dtype const * da = (dtype const*)a; dtype * db = (dtype *)b; for (int64_t i=0; i<n; i++){ db[inc_b*i] = da[inc_a*i]; } } void copy(int64_t m, int64_t n, char const * a, int64_t lda_a, char * b, int64_t lda_b) const { dtype const * da = (dtype const*)a; dtype * db = (dtype *)b; for (int64_t j=0; j<n; j++){ for (int64_t i=0; i<m; i++){ db[j*lda_b+i] = da[j*lda_a+i]; } } } void init(int64_t n, char * arr) const { dtype addid = dtype(); set(arr, (char const *)&addid, n); } /** \brief initialize n objects to zero * \param[in] n number of items * \param[in] arr array containing n items, to be set to zero */ virtual void init_shell(int64_t n, char * arr) const { dtype dummy = dtype(); for (int i=0; i<n; i++){ memcpy(arr+i*el_size,(char*)&dummy,el_size); } } CTF_int::bivar_function * get_elementwise_smaller() const { return new Bivar_Function<dtype,dtype,bool>([](dtype a, dtype b){ return !CTF_int::default_isequal<dtype>(CTF_int::default_max<dtype,is_ord>(a,b), a);}); } CTF_int::bivar_function * get_elementwise_smaller_or_equal() const { return new Bivar_Function<dtype,dtype,bool>([](dtype a, dtype b){ return CTF_int::default_isequal<dtype>(CTF_int::default_max<dtype,is_ord>(a,b), b);}); } CTF_int::bivar_function * get_elementwise_is_equal() const { return new Bivar_Function<dtype,dtype,bool>([](dtype a, dtype b){ return CTF_int::default_isequal<dtype>(a, b);}); } CTF_int::bivar_function * get_elementwise_is_not_equal() const { return new Bivar_Function<dtype,dtype,bool>([](dtype a, dtype b){ return !CTF_int::default_isequal<dtype>(a, b);}); } /* void copy(int64_t m, int64_t n, char const * a, int64_t lda_a, char const * alpha, char * b, int64_t lda_b, char const * beta) const { dtype const * da = (dtype const*)a; dtype dalpha = *((dtype const*)alpha); dtype dbeta = *((dtype const*)beta); dtype * db = (dtype *)b; for (int64_t j=0; j<n; j++){ for (int64_t i=0; i<m; i++){ dbeta*db[j*lda_b+i] += dalpha*da[j*lda_a+i] } } }*/ }; //FIXME do below with macros to shorten template <> inline void Set<float>::cast_double(double d, char * c) const { ((float*)c)[0] = (float)d; } template <> inline void Set<double>::cast_double(double d, char * c) const { ((double*)c)[0] = d; } template <> inline void Set<long double>::cast_double(double d, char * c) const { ((long double*)c)[0] = (long double)d; } template <> inline void Set<int>::cast_double(double d, char * c) const { ((int*)c)[0] = (int)d; } template <> inline void Set<uint64_t>::cast_double(double d, char * c) const { ((uint64_t*)c)[0] = (uint64_t)d; } template <> inline void Set<int64_t>::cast_double(double d, char * c) const { ((int64_t*)c)[0] = (int64_t)d; } template <> inline void Set< std::complex<float>,false >::cast_double(double d, char * c) const { ((std::complex<float>*)c)[0] = (std::complex<float>)d; } template <> inline void Set< std::complex<double>,false >::cast_double(double d, char * c) const { ((std::complex<double>*)c)[0] = (std::complex<double>)d; } template <> inline void Set< std::complex<long double>,false >::cast_double(double d, char * c) const { ((std::complex<long double>*)c)[0] = (std::complex<long double>)d; } template <> inline void Set<float>::cast_int(int64_t d, char * c) const { ((float*)c)[0] = (float)d; } template <> inline void Set<double>::cast_int(int64_t d, char * c) const { ((double*)c)[0] = (double)d; } template <> inline void Set<long double>::cast_int(int64_t d, char * c) const { ((long double*)c)[0] = (long double)d; } template <> inline void Set<int>::cast_int(int64_t d, char * c) const { ((int*)c)[0] = (int)d; } template <> inline void Set<uint64_t>::cast_int(int64_t d, char * c) const { ((uint64_t*)c)[0] = (uint64_t)d; } template <> inline void Set<int64_t>::cast_int(int64_t d, char * c) const { ((int64_t*)c)[0] = (int64_t)d; } template <> inline void Set< std::complex<float>,false >::cast_int(int64_t d, char * c) const { ((std::complex<float>*)c)[0] = (std::complex<float>)d; } template <> inline void Set< std::complex<double>,false >::cast_int(int64_t d, char * c) const { ((std::complex<double>*)c)[0] = (std::complex<double>)d; } template <> inline void Set< std::complex<long double>,false >::cast_int(int64_t d, char * c) const { ((std::complex<long double>*)c)[0] = (std::complex<long double>)d; } template <> inline double Set<float>::cast_to_double(char const * c) const { return (double)(((float*)c)[0]); } template <> inline double Set<double>::cast_to_double(char const * c) const { return ((double*)c)[0]; } template <> inline double Set<int>::cast_to_double(char const * c) const { return (double)(((int*)c)[0]); } template <> inline double Set<uint64_t>::cast_to_double(char const * c) const { return (double)(((uint64_t*)c)[0]); } template <> inline double Set<int64_t>::cast_to_double(char const * c) const { return (double)(((int64_t*)c)[0]); } template <> inline int64_t Set<int64_t>::cast_to_int(char const * c) const { return ((int64_t*)c)[0]; } template <> inline int64_t Set<int>::cast_to_int(char const * c) const { return (int64_t)(((int*)c)[0]); } template <> inline int64_t Set<unsigned int>::cast_to_int(char const * c) const { return (int64_t)(((unsigned int*)c)[0]); } template <> inline int64_t Set<uint64_t>::cast_to_int(char const * c) const { return (int64_t)(((uint64_t*)c)[0]); } template <> inline int64_t Set<bool>::cast_to_int(char const * c) const { return (int64_t)(((bool*)c)[0]); } template <> inline void Set<float>::print(char const * a, FILE * fp) const { fprintf(fp,"%11.5E",((float*)a)[0]); } template <> inline void Set<double>::print(char const * a, FILE * fp) const { fprintf(fp,"%11.5E",((double*)a)[0]); } template <> inline void Set<int64_t>::print(char const * a, FILE * fp) const { fprintf(fp,"%ld",((int64_t*)a)[0]); } template <> inline void Set<uint64_t>::print(char const * a, FILE * fp) const { fprintf(fp,"%lu",((uint64_t*)a)[0]); } template <> inline void Set<uint32_t>::print(char const * a, FILE * fp) const { fprintf(fp,"%u",((uint32_t*)a)[0]); } template <> inline void Set<int>::print(char const * a, FILE * fp) const { fprintf(fp,"%d",((int*)a)[0]); } template <> inline void Set< std::complex<float>,false >::print(char const * a, FILE * fp) const { fprintf(fp,"(%11.5E,%11.5E)",((std::complex<float>*)a)[0].real(),((std::complex<float>*)a)[0].imag()); } template <> inline void Set< std::complex<double>,false >::print(char const * a, FILE * fp) const { fprintf(fp,"(%11.5E,%11.5E)",((std::complex<double>*)a)[0].real(),((std::complex<double>*)a)[0].imag()); } template <> inline void Set< std::complex<long double>,false >::print(char const * a, FILE * fp) const { fprintf(fp,"(%11.5LE,%11.5LE)",((std::complex<long double>*)a)[0].real(),((std::complex<long double>*)a)[0].imag()); } template <> inline bool Set<float>::isequal(char const * a, char const * b) const { if (a == NULL && b == NULL) return true; if (a == NULL || b == NULL) return false; return ((float*)a)[0] == ((float*)b)[0]; } template <> inline bool Set<double>::isequal(char const * a, char const * b) const { if (a == NULL && b == NULL) return true; if (a == NULL || b == NULL) return false; return ((double*)a)[0] == ((double*)b)[0]; } template <> inline bool Set<int>::isequal(char const * a, char const * b) const { if (a == NULL && b == NULL) return true; if (a == NULL || b == NULL) return false; return ((int*)a)[0] == ((int*)b)[0]; } template <> inline bool Set<uint64_t>::isequal(char const * a, char const * b) const { if (a == NULL && b == NULL) return true; if (a == NULL || b == NULL) return false; return ((uint64_t*)a)[0] == ((uint64_t*)b)[0]; } template <> inline bool Set<int64_t>::isequal(char const * a, char const * b) const { if (a == NULL && b == NULL) return true; if (a == NULL || b == NULL) return false; return ((int64_t*)a)[0] == ((int64_t*)b)[0]; } template <> inline bool Set<long double>::isequal(char const * a, char const * b) const { if (a == NULL && b == NULL) return true; if (a == NULL || b == NULL) return false; return ((long double*)a)[0] == ((long double*)b)[0]; } template <> inline bool Set< std::complex<float>,false >::isequal(char const * a, char const * b) const { if (a == NULL && b == NULL) return true; if (a == NULL || b == NULL) return false; return (( std::complex<float> *)a)[0] == (( std::complex<float> *)b)[0]; } template <> inline bool Set< std::complex<double>,false >::isequal(char const * a, char const * b) const { if (a == NULL && b == NULL) return true; if (a == NULL || b == NULL) return false; return (( std::complex<double> *)a)[0] == (( std::complex<double> *)b)[0]; } template <> inline bool Set< std::complex<long double>,false >::isequal(char const * a, char const * b) const { if (a == NULL && b == NULL) return true; if (a == NULL || b == NULL) return false; return (( std::complex<long double> *)a)[0] == (( std::complex<long double> *)b)[0]; } /** * @} */ } #include "monoid.h" #endif
DRB053-inneronly1-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Example with loop-carried data dependence at the outer level loop. But the inner level loop can be parallelized. */ #include <string.h> int main(int argc,char *argv[]) { int i; int j; double a[20][20]; memset(a,0,(sizeof(a))); #pragma omp parallel for private(i ,j ) for (i = 0; i < 20; i++) #pragma omp parallel for private(j ) for (j = 0; j < 20; j++) a[i][j] = i * 20 + j; for (i = 0; i < 20 -1; i += 1) { #pragma omp parallel for private(j ) for (j = 0; j < 20; j += 1) { a[i][j] += a[i + 1][j]; } } for (i = 0; i < 20; i++) for (j = 0; j < 20; j++) printf("%lf\n",a[i][j]); return 0; }
spmv.c
/*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * * SPMV: Sparse-Matrix Dense-Vector Multiplication * Computes the product of a sparse matrix with a dense vector. * The sparse matrix is read from file in coordinate format, converted * to JDS format with configurable padding and alignment for different * devices. * ***************************************************************************/ /*************************************************************************** * * This benchmark was adapted to run on GPUs with OpenMP 4.0 pragmas * and OpenCL driver implemented in gpuclang 2.0 (based on clang 3.5) * * Marcio M Pereira <mpereira@ic.unicamp.br> * ***************************************************************************/ /* * === NOTE === * * The Polyhedral optmizations restricts the class of loops it can manipulate * to sequences of imperfectly nested loops with particular constraints on the * loop bound and array subscript expressions. * * To allow this optimization we fixed the problem size with __STATIC__ tag * comment this tag to use original version. * */ #ifndef __STATIC__ #define __STATIC__ #endif #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "convert_dataset.h" #include "../../common/parboil.h" #include "../../common/polybenchUtilFuncts.h" #define ERROR_THRESHOLD 0.05 #define GPU 1 #ifdef __STATIC__ // Define statically the problem size #define N 146689 #else int N; #endif /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; double t_start, t_end, t_start_GPU, t_end_GPU; float *h_Ax_vector_GPU, *h_Ax_vector_CPU; void input_vec(char *fName,float *h_vec,int dim) { FILE* fid = fopen(fName, "rb"); fread (h_vec, sizeof (float), dim, fid); fclose(fid); } void compareResults(DATA_TYPE *A, DATA_TYPE *A_GPU) { int i, fail=0; for (i=0; i < N; i++) { if (percentDiff(A[i], A_GPU[i]) > ERROR_THRESHOLD) { fail++; } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", ERROR_THRESHOLD, fail); } static int generate_vector(float *x_vector, int dim) { srand(54321); int i; for(i=0;i<dim;i++) { x_vector[i] = (rand() / (float) RAND_MAX); } return 0; } double spmvGPU(int argc, char** argv) { struct pb_Parameters *parameters; parameters = pb_ReadParameters(&argc, argv); if ((parameters->inpFiles[0] == NULL) || (parameters->inpFiles[1] == NULL)) { fprintf(stderr, "Expecting two input filenames\n"); exit(-1); } int len; int depth; int dim; int pad = 1; int nzcnt_len; float *h_data; int *h_indices; int *h_ptr; int *h_perm; int *h_nzcnt; //vector float *h_Ax_vector; float *h_x_vector; int col_count; coo_to_jds( parameters->inpFiles[0], // bcsstk32.mtx, fidapm05.mtx, jgl009.mtx 1, // row padding pad, // warp size 1, // pack size 1, // is mirrored? 0, // binary matrix 0, // debug level [0:2] &h_data, &h_ptr, &h_nzcnt, &h_indices, &h_perm, &col_count, &dim, &len, &nzcnt_len, &depth ); h_Ax_vector=(float*)malloc(sizeof(float)*dim); h_x_vector=(float*)malloc(sizeof(float)*dim); input_vec( parameters->inpFiles[1],h_x_vector,dim); #ifndef __STATIC__ N = dim; #endif int p, i; t_start_GPU = rtclock(); //main execution #pragma omp target device(GPU) \ map(to: h_nzcnt[:nzcnt_len], h_ptr[:col_count], h_indices[:len], h_data[:len], h_perm[:col_count], h_x_vector[:N]) \ map(from: h_Ax_vector[:N]) for(p=0;p<50;p++) { #pragma omp parallel for for (i = 0; i < N; i++) { int k; float sum = 0.0f; int bound = h_nzcnt[i]; for(k=0;k<bound;k++ ) { int j = h_ptr[k] + i; int in = h_indices[j]; float d = h_data[j]; float t = h_x_vector[in]; sum += d*t; } h_Ax_vector[h_perm[i]] = sum; } } t_end_GPU = rtclock(); h_Ax_vector_GPU = h_Ax_vector; free (h_data); free (h_indices); free (h_ptr); free (h_perm); free (h_nzcnt); free (h_x_vector); pb_FreeParameters(parameters); return t_end_GPU - t_start_GPU; } double spmvCPU(int argc, char** argv) { struct pb_Parameters *parameters; parameters = pb_ReadParameters(&argc, argv); if ((parameters->inpFiles[0] == NULL) || (parameters->inpFiles[1] == NULL)) { fprintf(stderr, "Expecting two input filenames\n"); exit(-1); } int len; int depth; int dim; int pad = 1; int nzcnt_len; float *h_data; int *h_indices; int *h_ptr; int *h_perm; int *h_nzcnt; //vector float *h_Ax_vector; float *h_x_vector; int col_count; coo_to_jds( parameters->inpFiles[0], // bcsstk32.mtx, fidapm05.mtx, jgl009.mtx 1, // row padding pad, // warp size 1, // pack size 1, // is mirrored? 0, // binary matrix 0, // debug level [0:2] &h_data, &h_ptr, &h_nzcnt, &h_indices, &h_perm, &col_count, &dim, &len, &nzcnt_len, &depth ); h_Ax_vector=(float*)malloc(sizeof(float)*dim); h_x_vector=(float*)malloc(sizeof(float)*dim); input_vec( parameters->inpFiles[1],h_x_vector,dim); #ifndef __STATIC__ N = dim; #endif int p, i; //main execution t_start = rtclock(); for(p=0;p<50;p++) { for (i = 0; i < N; i++) { int k; float sum = 0.0f; int bound = h_nzcnt[i]; for(k=0;k<bound;k++ ) { int j = h_ptr[k] + i; int in = h_indices[j]; float d = h_data[j]; float t = h_x_vector[in]; sum += d*t; } h_Ax_vector[h_perm[i]] = sum; } } t_end = rtclock(); h_Ax_vector_CPU = h_Ax_vector; free (h_data); free (h_indices); free (h_ptr); free (h_perm); free (h_nzcnt); free (h_x_vector); pb_FreeParameters(parameters); return t_end - t_start; } int main(int argc, char** argv) { double t_GPU, t_CPU; t_GPU = spmvGPU(argc, argv); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_GPU); t_CPU = spmvCPU(argc, argv); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_CPU); compareResults(h_Ax_vector_GPU, h_Ax_vector_CPU); free (h_Ax_vector_GPU); free (h_Ax_vector_CPU); return 0; }
triplet_kpoint.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* These codes were originally parts of spglib, but only develped */ /* and used for phono3py. Therefore these were moved from spglib to */ /* phono3py. This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <mathfunc.h> #include <kpoint.h> #include <kgrid.h> #include <triplet_h/triplet.h> #include <triplet_h/triplet_kpoint.h> #define KPT_NUM_BZ_SEARCH_SPACE 125 static int bz_search_space[KPT_NUM_BZ_SEARCH_SPACE][3] = { { 0, 0, 0}, { 0, 0, 1}, { 0, 0, 2}, { 0, 0, -2}, { 0, 0, -1}, { 0, 1, 0}, { 0, 1, 1}, { 0, 1, 2}, { 0, 1, -2}, { 0, 1, -1}, { 0, 2, 0}, { 0, 2, 1}, { 0, 2, 2}, { 0, 2, -2}, { 0, 2, -1}, { 0, -2, 0}, { 0, -2, 1}, { 0, -2, 2}, { 0, -2, -2}, { 0, -2, -1}, { 0, -1, 0}, { 0, -1, 1}, { 0, -1, 2}, { 0, -1, -2}, { 0, -1, -1}, { 1, 0, 0}, { 1, 0, 1}, { 1, 0, 2}, { 1, 0, -2}, { 1, 0, -1}, { 1, 1, 0}, { 1, 1, 1}, { 1, 1, 2}, { 1, 1, -2}, { 1, 1, -1}, { 1, 2, 0}, { 1, 2, 1}, { 1, 2, 2}, { 1, 2, -2}, { 1, 2, -1}, { 1, -2, 0}, { 1, -2, 1}, { 1, -2, 2}, { 1, -2, -2}, { 1, -2, -1}, { 1, -1, 0}, { 1, -1, 1}, { 1, -1, 2}, { 1, -1, -2}, { 1, -1, -1}, { 2, 0, 0}, { 2, 0, 1}, { 2, 0, 2}, { 2, 0, -2}, { 2, 0, -1}, { 2, 1, 0}, { 2, 1, 1}, { 2, 1, 2}, { 2, 1, -2}, { 2, 1, -1}, { 2, 2, 0}, { 2, 2, 1}, { 2, 2, 2}, { 2, 2, -2}, { 2, 2, -1}, { 2, -2, 0}, { 2, -2, 1}, { 2, -2, 2}, { 2, -2, -2}, { 2, -2, -1}, { 2, -1, 0}, { 2, -1, 1}, { 2, -1, 2}, { 2, -1, -2}, { 2, -1, -1}, {-2, 0, 0}, {-2, 0, 1}, {-2, 0, 2}, {-2, 0, -2}, {-2, 0, -1}, {-2, 1, 0}, {-2, 1, 1}, {-2, 1, 2}, {-2, 1, -2}, {-2, 1, -1}, {-2, 2, 0}, {-2, 2, 1}, {-2, 2, 2}, {-2, 2, -2}, {-2, 2, -1}, {-2, -2, 0}, {-2, -2, 1}, {-2, -2, 2}, {-2, -2, -2}, {-2, -2, -1}, {-2, -1, 0}, {-2, -1, 1}, {-2, -1, 2}, {-2, -1, -2}, {-2, -1, -1}, {-1, 0, 0}, {-1, 0, 1}, {-1, 0, 2}, {-1, 0, -2}, {-1, 0, -1}, {-1, 1, 0}, {-1, 1, 1}, {-1, 1, 2}, {-1, 1, -2}, {-1, 1, -1}, {-1, 2, 0}, {-1, 2, 1}, {-1, 2, 2}, {-1, 2, -2}, {-1, 2, -1}, {-1, -2, 0}, {-1, -2, 1}, {-1, -2, 2}, {-1, -2, -2}, {-1, -2, -1}, {-1, -1, 0}, {-1, -1, 1}, {-1, -1, 2}, {-1, -1, -2}, {-1, -1, -1} }; static void grid_point_to_address_double(int address_double[3], const int grid_point, const int mesh[3], const int is_shift[3]); static int get_ir_triplets_at_q(int map_triplets[], int map_q[], int grid_address[][3], const int grid_point, const int mesh[3], const MatINT * rot_reciprocal, const int swappable); static int get_BZ_triplets_at_q(int triplets[][3], const int grid_point, TPLCONST int bz_grid_address[][3], const int bz_map[], const int map_triplets[], const int num_map_triplets, const int mesh[3]); static int get_third_q_of_triplets_at_q(int bz_address[3][3], const int q_index, const int bz_map[], const int mesh[3], const int bzmesh[3]); static void modulo_i3(int v[3], const int m[3]); int tpk_get_ir_triplets_at_q(int map_triplets[], int map_q[], int grid_address[][3], const int grid_point, const int mesh[3], const int is_time_reversal, const MatINT * rotations, const int swappable) { int num_ir; MatINT *rot_reciprocal; rot_reciprocal = kpt_get_point_group_reciprocal(rotations, is_time_reversal); num_ir = get_ir_triplets_at_q(map_triplets, map_q, grid_address, grid_point, mesh, rot_reciprocal, swappable); mat_free_MatINT(rot_reciprocal); return num_ir; } int tpk_get_BZ_triplets_at_q(int triplets[][3], const int grid_point, TPLCONST int bz_grid_address[][3], const int bz_map[], const int map_triplets[], const int num_map_triplets, const int mesh[3]) { return get_BZ_triplets_at_q(triplets, grid_point, bz_grid_address, bz_map, map_triplets, num_map_triplets, mesh); } static int get_ir_triplets_at_q(int map_triplets[], int map_q[], int grid_address[][3], const int grid_point, const int mesh[3], const MatINT * rot_reciprocal, const int swappable) { int i, j, num_grid, q_2, num_ir_q, num_ir_triplets, ir_grid_point; int mesh_double[3], is_shift[3]; int address_double0[3], address_double1[3], address_double2[3]; int *ir_grid_points, *third_q; double tolerance; double stabilizer_q[1][3]; MatINT *rot_reciprocal_q; tolerance = 0.01 / (mesh[0] + mesh[1] + mesh[2]); num_grid = mesh[0] * mesh[1] * mesh[2]; for (i = 0; i < 3; i++) { /* Only consider the gamma-point */ is_shift[i] = 0; mesh_double[i] = mesh[i] * 2; } /* Search irreducible q-points (map_q) with a stabilizer */ /* q */ grid_point_to_address_double(address_double0, grid_point, mesh, is_shift); for (i = 0; i < 3; i++) { stabilizer_q[0][i] = (double)address_double0[i] / mesh_double[i] - (address_double0[i] > mesh[i]); } rot_reciprocal_q = kpt_get_point_group_reciprocal_with_q(rot_reciprocal, tolerance, 1, stabilizer_q); num_ir_q = kpt_get_irreducible_reciprocal_mesh(grid_address, map_q, mesh, is_shift, rot_reciprocal_q); mat_free_MatINT(rot_reciprocal_q); third_q = (int*) malloc(sizeof(int) * num_ir_q); ir_grid_points = (int*) malloc(sizeof(int) * num_ir_q); num_ir_q = 0; for (i = 0; i < num_grid; i++) { if (map_q[i] == i) { ir_grid_points[num_ir_q] = i; num_ir_q++; } map_triplets[i] = -1; } #pragma omp parallel for private(j, address_double1, address_double2) for (i = 0; i < num_ir_q; i++) { grid_point_to_address_double(address_double1, ir_grid_points[i], mesh, is_shift); /* q' */ for (j = 0; j < 3; j++) { /* q'' */ address_double2[j] = - address_double0[j] - address_double1[j]; } third_q[i] = kgd_get_grid_point_double_mesh(address_double2, mesh); } num_ir_triplets = 0; if (swappable) { /* search q1 <-> q2 */ for (i = 0; i < num_ir_q; i++) { ir_grid_point = ir_grid_points[i]; q_2 = third_q[i]; if (map_triplets[map_q[q_2]] > -1) { map_triplets[ir_grid_point] = map_q[q_2]; } else { map_triplets[ir_grid_point] = ir_grid_point; num_ir_triplets++; } } } else { for (i = 0; i < num_ir_q; i++) { ir_grid_point = ir_grid_points[i]; map_triplets[ir_grid_point] = ir_grid_point; num_ir_triplets++; } } #pragma omp parallel for for (i = 0; i < num_grid; i++) { map_triplets[i] = map_triplets[map_q[i]]; } free(third_q); third_q = NULL; free(ir_grid_points); ir_grid_points = NULL; return num_ir_triplets; } static int get_BZ_triplets_at_q(int triplets[][3], const int grid_point, TPLCONST int bz_grid_address[][3], const int bz_map[], const int map_triplets[], const int num_map_triplets, const int mesh[3]) { int i, j, k, num_ir; int bz_address[3][3], bz_address_double[3], bzmesh[3]; int *ir_grid_points; for (i = 0; i < 3; i++) { bzmesh[i] = mesh[i] * 2; } num_ir = 0; ir_grid_points = (int*) malloc(sizeof(int) * num_map_triplets); for (i = 0; i < num_map_triplets; i++) { if (map_triplets[i] == i) { ir_grid_points[num_ir] = i; num_ir++; } } #pragma omp parallel for private(j, k, bz_address, bz_address_double) for (i = 0; i < num_ir; i++) { for (j = 0; j < 3; j++) { bz_address[0][j] = bz_grid_address[grid_point][j]; bz_address[1][j] = bz_grid_address[ir_grid_points[i]][j]; bz_address[2][j] = - bz_address[0][j] - bz_address[1][j]; } for (j = 2; j > -1; j--) { if (get_third_q_of_triplets_at_q(bz_address, j, bz_map, mesh, bzmesh) == 0) { break; } } for (j = 0; j < 3; j++) { for (k = 0; k < 3; k++) { bz_address_double[k] = bz_address[j][k] * 2; } triplets[i][j] = bz_map[kgd_get_grid_point_double_mesh(bz_address_double, bzmesh)]; } } free(ir_grid_points); return num_ir; } static int get_third_q_of_triplets_at_q(int bz_address[3][3], const int q_index, const int bz_map[], const int mesh[3], const int bzmesh[3]) { int i, j, smallest_g, smallest_index, sum_g, delta_g[3]; int bzgp[KPT_NUM_BZ_SEARCH_SPACE], bz_address_double[3]; modulo_i3(bz_address[q_index], mesh); for (i = 0; i < 3; i++) { delta_g[i] = 0; for (j = 0; j < 3; j++) { delta_g[i] += bz_address[j][i]; } delta_g[i] /= mesh[i]; } for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { for (j = 0; j < 3; j++) { bz_address_double[j] = (bz_address[q_index][j] + bz_search_space[i][j] * mesh[j]) * 2; } bzgp[i] = bz_map[kgd_get_grid_point_double_mesh(bz_address_double, bzmesh)]; } for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { if (bzgp[i] != -1) { goto escape; } } escape: smallest_g = 4; smallest_index = 0; for (i = 0; i < KPT_NUM_BZ_SEARCH_SPACE; i++) { if (bzgp[i] > -1) { /* q'' is in BZ */ sum_g = (abs(delta_g[0] + bz_search_space[i][0]) + abs(delta_g[1] + bz_search_space[i][1]) + abs(delta_g[2] + bz_search_space[i][2])); if (sum_g < smallest_g) { smallest_index = i; smallest_g = sum_g; } } } for (i = 0; i < 3; i++) { bz_address[q_index][i] += bz_search_space[smallest_index][i] * mesh[i]; } return smallest_g; } static void grid_point_to_address_double(int address_double[3], const int grid_point, const int mesh[3], const int is_shift[3]) { int i; int address[3]; #ifndef GRID_ORDER_XYZ address[2] = grid_point / (mesh[0] * mesh[1]); address[1] = (grid_point - address[2] * mesh[0] * mesh[1]) / mesh[0]; address[0] = grid_point % mesh[0]; #else address[0] = grid_point / (mesh[1] * mesh[2]); address[1] = (grid_point - address[0] * mesh[1] * mesh[2]) / mesh[2]; address[2] = grid_point % mesh[2]; #endif for (i = 0; i < 3; i++) { address_double[i] = address[i] * 2 + is_shift[i]; } } static void modulo_i3(int v[3], const int m[3]) { int i; for (i = 0; i < 3; i++) { v[i] = v[i] % m[i]; if (v[i] < 0) { v[i] += m[i]; } } }
GB_unop__acos_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__acos_fp32_fp32 // op(A') function: GB_unop_tran__acos_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = acosf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = acosf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = acosf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ACOS || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__acos_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = acosf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = acosf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__acos_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_uint64_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_uint8) // op(A') function: GB (_unop_tran__identity_uint64_uint8) // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_uint8) ( uint64_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
scheduler-clause.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif int main(int argc, char **argv) { int i, n=20,a[n],suma=0; if(argc < 2) { fprintf(stderr,"\nFalta iteraciones \n"); exit(-1); } n = atoi(argv[1]); if (n>20) n=20; for (i=0; i<n; i++) a[i] = i; #pragma omp parallel for firstprivate(suma) \ lastprivate(suma) schedule(runtime) for (i=0; i<n; i++) { suma = suma + a[i]; printf(" thread %d suma a[%d]=%d suma=%d \n", omp_get_thread_num(),i,a[i],suma); } printf("Fuera de 'parallel for' suma=%d\n",suma); return(0); }
target_exit_data.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu #include <stdio.h> int main() { int i; // CHECK: addr=0x[[#%x,HOST_ADDR:]], size=[[#%u,SIZE:]] fprintf(stderr, "addr=%p, size=%ld\n", &i, sizeof i); // CHECK-NOT: Libomptarget #pragma omp target enter data map(alloc: i) #pragma omp target exit data map(present, release: i) // CHECK: i is present fprintf(stderr, "i is present\n"); // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] ([[#SIZE]] bytes) // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target exit data map(present, release: i) // CHECK-NOT: i is present fprintf(stderr, "i is present\n"); return 0; }
tzvjsvd.c
#include "aalloc.h" #include "mtxio.h" #include "pjs.h" #include "timer.h" #include "zmerge.h" #include "zsplit.h" #include "zvjsvd.h" int main(int argc, char *argv[]) { (void)set_cbwr(); if (argc != 5) { (void)fprintf(stderr, "%s J M N BaseName\n", *argv); return 1; } const fnat m = (fnat)atoz(argv[2u]); if (!m) return 3; fnat ldG = m, ldGr = m, ldGi = m; const fnat n = (fnat)atoz(argv[3u]); if (!n) return 4; if (n > m) return 4; if (n & 1u) return 4; if ((n >> 1u) & VDL_1) return 4; fnat ldV = n, ldVr = n, ldVi = n; const long j = atol(argv[1u]); switch (j) { case PJS_ME: case PJS_MM: break; default: return 2; } unsigned stp = 0u; const unsigned *const js = pjs(j, (unsigned)n, &stp); if (!js) return 2; const char *const bn = argv[4u]; if (!*bn) return 5; const int gd = open_ro_(bn, "G"); if (gd < 0) return 5; double complex *G = (double complex*)NULL; double *Gr = (double*)NULL; double *Gi = (double*)NULL; if (zalloc2_(&m, &n, &G, &ldG, &Gr, &ldGr, &Gi, &ldGi) < 0) return 6; if (zread2_(&m, &n, G, &ldG, &gd)) return 5; if (close(gd)) return 5; unsigned rd[2u] = { 0u, 0u }; const uint64_t hz = tsc_get_freq_hz_(rd); uint64_t b = rdtsc_beg(rd); if (zsplit_(&m, &n, G, &ldG, Gr, &ldGr, Gi, &ldGi) < 0) return 6; uint64_t e = rdtsc_end(rd); const long double ts = tsc_lap(hz, b, e); double complex *V = (double complex*)NULL; double *Vr = (double*)NULL; double *Vi = (double*)NULL; if (zalloc2_(&n, &n, &V, &ldV, &Vr, &ldVr, &Vi, &ldVi) < 0) return 7; double *const w = (double*)aligned_alloc(VA, (7u * (n * sizeof(double)))); if (!w) return 8; double *const eS = w; double *const fS = eS + n; double *const work = fS + n; wide *const ws = (wide*)work; #ifdef JTRACE (void)sprintf((char*)work, "%s.%ld", bn, #ifdef _OPENMP j #else /* !_OPENMP */ (j - 1l) #endif /* ?_OPENMP */ ); #endif /* JTRACE */ unsigned *const iwork = (unsigned*)aligned_alloc(VA, ((n >> VDLlg) * sizeof(unsigned))); if (!iwork) return 9; const unsigned swp = 999u; b = rdtsc_beg(rd); const fint o = zvjsvd_(&m, &n, Gr, &ldGr, Gi, &ldGi, Vr, &ldVr, Vi, &ldVi, eS, fS, js, &stp, &swp, work, iwork); e = rdtsc_end(rd); const long double tj = tsc_lap(hz, b, e); free(iwork); #ifdef _OPENMP #pragma omp parallel for default(none) shared(eS,fS,ws,n) #endif /* _OPENMP */ for (fnat i = 0u; i < n; ++i) { if (eS[i] != 0.0) ws[i] = scalbw(fS[i], eS[i]); else // 2^0 == 1 ws[i] = fS[i]; } const int sd = open_wo_(bn, #ifdef _OPENMP ((j == PJS_ME) ? "S2" : "S4") #else /* !_OPENMP */ ((j == PJS_ME) ? "S1" : "S3") #endif /* ?_OPENMP */ ); if (sd < 0) return 10; *(size_t*)js = (n * sizeof(wide)); if (resizef_(&sd, (const size_t*)js)) return 10; const fnat n2 = (n << 1u), n1 = 1u; if (dwrite2_(&n2, &n1, work, &n2, &sd)) return 10; if (close(sd)) return 10; free(w); b = rdtsc_beg(rd); if (zmerge_(&n, &n, Vr, &ldVr, Vi, &ldVi, V, &ldV) < 0) return 11; e = rdtsc_end(rd); const long double tv = tsc_lap(hz, b, e); free(Vi); free(Vr); const int vd = open_wo_(bn, #ifdef _OPENMP ((j == PJS_ME) ? "V2" : "V4") #else /* !_OPENMP */ ((j == PJS_ME) ? "V1" : "V3") #endif /* ?_OPENMP */ ); if (vd < 0) return 12; *(size_t*)js = (n * (n * sizeof(double complex))); if (resizef_(&vd, (const size_t*)js)) return 12; if (zwrite2_(&n, &n, V, &ldV, &vd)) return 12; if (close(vd)) return 12; free(V); b = rdtsc_beg(rd); if (zmerge_(&m, &n, Gr, &ldGr, Gi, &ldGi, G, &ldG) < 0) return 13; e = rdtsc_end(rd); const long double tg = tsc_lap(hz, b, e); free(Gi); free(Gr); const int ud = open_wo_(bn, #ifdef _OPENMP ((j == PJS_ME) ? "U2" : "U4") #else /* !_OPENMP */ ((j == PJS_ME) ? "U1" : "U3") #endif /* ?_OPENMP */ ); if (ud < 0) return 14; *(size_t*)js = (m * (n * sizeof(double complex))); if (resizef_(&ud, (const size_t*)js)) return 14; if (zwrite2_(&m, &n, G, &ldG, &ud)) return 14; if (close(ud)) return 14; free(G); (void)fprintf(stdout, "\"%s\",%1ld,%4llu,%4llu,%15.9Lf,%15.9Lf,%3lld,%15.9Lf,%15.9Lf\n", bn, #ifdef _OPENMP j #else /* !_OPENMP */ (j - 1l) #endif /* ?_OPENMP */ , (unsigned long long)m, (unsigned long long)n, ts, tj, (long long)o, tv, tg); (void)fflush(stdout); free((void*)js); return EXIT_SUCCESS; }
IJMatrix_parcsr.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * IJMatrix_ParCSR interface * *****************************************************************************/ #include "_hypre_IJ_mv.h" #include "_hypre_parcsr_mv.h" #include "../HYPRE.h" /****************************************************************************** * * hypre_IJMatrixCreateParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixCreateParCSR(hypre_IJMatrix *matrix) { MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); hypre_ParCSRMatrix *par_matrix; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Int num_procs; HYPRE_Int i; hypre_MPI_Comm_size(comm,&num_procs); #ifdef HYPRE_NO_GLOBAL_PARTITION row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); if (hypre_IJMatrixGlobalFirstRow(matrix)) { for (i = 0; i < 2; i++) { row_starts[i] = row_partitioning[i] - hypre_IJMatrixGlobalFirstRow(matrix); } } else { for (i = 0; i < 2; i++) { row_starts[i] = row_partitioning[i]; } } if (row_partitioning != col_partitioning) { col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); if (hypre_IJMatrixGlobalFirstCol(matrix)) { for (i = 0; i < 2; i++) { col_starts[i] = col_partitioning[i]-hypre_IJMatrixGlobalFirstCol(matrix); } } else { for (i = 0; i < 2; i++) { col_starts[i] = col_partitioning[i]; } } } else { col_starts = row_starts; } par_matrix = hypre_ParCSRMatrixCreate(comm, hypre_IJMatrixGlobalNumRows(matrix), hypre_IJMatrixGlobalNumCols(matrix), row_starts, col_starts, 0, 0, 0); #else row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); if (row_partitioning[0]) { for (i = 0; i < num_procs+1; i++) { row_starts[i] = row_partitioning[i]-row_partitioning[0]; } } else { for (i = 0; i < num_procs+1; i++) { row_starts[i] = row_partitioning[i]; } } if (row_partitioning != col_partitioning) { col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); if (col_partitioning[0]) { for (i = 0; i < num_procs+1; i++) { col_starts[i] = col_partitioning[i]-col_partitioning[0]; } } else { for (i = 0; i < num_procs+1; i++) { col_starts[i] = col_partitioning[i]; } } } else { col_starts = row_starts; } par_matrix = hypre_ParCSRMatrixCreate(comm, row_starts[num_procs], col_starts[num_procs], row_starts, col_starts, 0, 0, 0); #endif hypre_IJMatrixObject(matrix) = par_matrix; return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetRowSizesParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetRowSizesParCSR(hypre_IJMatrix *matrix, const HYPRE_Int *sizes) { HYPRE_Int local_num_rows, local_num_cols, i, *row_space = NULL; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); #ifdef HYPRE_NO_GLOBAL_PARTITION local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]); #else HYPRE_Int my_id; hypre_MPI_Comm_rank(hypre_IJMatrixComm(matrix), &my_id); local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]); local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]); #endif hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); if (aux_matrix) { row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); } if (!row_space) { row_space = hypre_CTAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST); } for (i = 0; i < local_num_rows; i++) { row_space[i] = sizes[i]; } if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, row_space); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_AuxParCSRMatrixRowSpace(aux_matrix) = row_space; #if defined(HYPRE_USING_CUDA) hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = 0; for (i = 0; i < local_num_rows; i++) { hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) += sizes[i]; } #endif return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetDiagOffdSizesParCSR * sets diag_i inside the diag part of the ParCSRMatrix * and offd_i inside the offd part, * requires exact row sizes for diag and offd * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetDiagOffdSizesParCSR(hypre_IJMatrix *matrix, const HYPRE_Int *diag_sizes, const HYPRE_Int *offd_sizes) { HYPRE_Int local_num_rows, local_num_cols; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); #ifdef HYPRE_NO_GLOBAL_PARTITION local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]); #else HYPRE_Int my_id; hypre_MPI_Comm_rank(hypre_IJMatrixComm(matrix), &my_id); local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]); local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]); #endif hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *)hypre_IJMatrixTranslator(matrix); if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } if ( hypre_AuxParCSRMatrixDiagSizes(aux_matrix) == NULL) { hypre_AuxParCSRMatrixDiagSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST); } if ( hypre_AuxParCSRMatrixOffdSizes(aux_matrix) == NULL) { hypre_AuxParCSRMatrixOffdSizes(aux_matrix) = hypre_TAlloc(HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST); } hypre_TMemcpy(hypre_AuxParCSRMatrixDiagSizes(aux_matrix), diag_sizes, HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_AuxParCSRMatrixOffdSizes(aux_matrix), offd_sizes, HYPRE_Int, local_num_rows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetMaxOnProcElmtsParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetMaxOnProcElmtsParCSR(hypre_IJMatrix *matrix, HYPRE_Int max_on_proc_elmts) { #if defined(HYPRE_USING_CUDA) hypre_AuxParCSRMatrix *aux_matrix; HYPRE_Int local_num_rows, local_num_cols, my_id; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_MPI_Comm_rank(comm,&my_id); aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); if (!aux_matrix) { #ifdef HYPRE_NO_GLOBAL_PARTITION local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]); #else local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]); local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]); #endif hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_AuxParCSRMatrixUsrOnProcElmts(aux_matrix) = max_on_proc_elmts; #endif return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetMaxOffProcElmtsParCSR * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetMaxOffProcElmtsParCSR(hypre_IJMatrix *matrix, HYPRE_Int max_off_proc_elmts) { hypre_AuxParCSRMatrix *aux_matrix; HYPRE_Int local_num_rows, local_num_cols, my_id; HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_MPI_Comm_rank(comm,&my_id); aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); if (!aux_matrix) { #ifdef HYPRE_NO_GLOBAL_PARTITION local_num_rows = (HYPRE_Int)(row_partitioning[1]-row_partitioning[0]); local_num_cols = (HYPRE_Int)(col_partitioning[1]-col_partitioning[0]); #else local_num_rows = (HYPRE_Int)(row_partitioning[my_id+1]-row_partitioning[my_id]); local_num_cols = (HYPRE_Int)(col_partitioning[my_id+1]-col_partitioning[my_id]); #endif hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, local_num_cols, NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; #if defined(HYPRE_USING_CUDA) hypre_AuxParCSRMatrixUsrOffProcElmts(aux_matrix) = max_off_proc_elmts; #endif return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixInitializeParCSR * * initializes AuxParCSRMatrix and ParCSRMatrix as necessary * *****************************************************************************/ HYPRE_Int hypre_IJMatrixInitializeParCSR(hypre_IJMatrix *matrix) { return hypre_IJMatrixInitializeParCSR_v2(matrix, hypre_HandleMemoryLocation(hypre_handle())); } HYPRE_Int hypre_IJMatrixInitializeParCSR_v2(hypre_IJMatrix *matrix, HYPRE_MemoryLocation memory_location) { hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); HYPRE_Int local_num_rows; HYPRE_MemoryLocation memory_location_aux = hypre_GetExecPolicy1(memory_location) == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE; if (hypre_IJMatrixAssembleFlag(matrix) == 0) { if (!par_matrix) { hypre_IJMatrixCreateParCSR(matrix); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); } local_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(par_matrix)); if (!aux_matrix) { hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(par_matrix)), NULL); hypre_IJMatrixTranslator(matrix) = aux_matrix; } hypre_ParCSRMatrixInitialize_v2(par_matrix, memory_location); hypre_AuxParCSRMatrixInitialize_v2(aux_matrix, memory_location_aux); if (!hypre_AuxParCSRMatrixNeedAux(aux_matrix)) { HYPRE_Int i, *indx_diag, *indx_offd, *diag_i, *offd_i, *diag_sizes, *offd_sizes; hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); diag_i = hypre_CSRMatrixI(diag); offd_i = hypre_CSRMatrixI(offd); diag_sizes = hypre_AuxParCSRMatrixDiagSizes(aux_matrix); offd_sizes = hypre_AuxParCSRMatrixOffdSizes(aux_matrix); indx_diag = hypre_AuxParCSRMatrixIndxDiag(aux_matrix); indx_offd = hypre_AuxParCSRMatrixIndxOffd(aux_matrix); for (i = 0; i < local_num_rows; i++) { diag_i[i+1] = diag_i[i] + diag_sizes[i]; } hypre_CSRMatrixNumNonzeros(diag) = diag_i[local_num_rows]; hypre_CSRMatrixInitialize(diag); for (i = 0; i < local_num_rows; i++) { offd_i[i+1] = offd_i[i] + offd_sizes[i]; } hypre_CSRMatrixNumNonzeros(offd) = offd_i[local_num_rows]; hypre_CSRMatrixInitialize(offd); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < local_num_rows; i++) { indx_diag[i] = diag_i[i]; indx_offd[i] = offd_i[i]; } } } else if ( memory_location_aux == HYPRE_MEMORY_HOST ) { /* AB 4/06 - the assemble routine destroys the aux matrix - so we need to recreate if initialize is called again */ if (!aux_matrix) { local_num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(par_matrix)); hypre_AuxParCSRMatrixCreate(&aux_matrix, local_num_rows, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(par_matrix)), NULL); hypre_AuxParCSRMatrixMemoryLocation(aux_matrix) = HYPRE_MEMORY_HOST; hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixGetRowCountsParCSR * * gets the number of columns for rows specified by the user * *****************************************************************************/ HYPRE_Int hypre_IJMatrixGetRowCountsParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_BigInt *rows, HYPRE_Int *ncols) { HYPRE_BigInt row_index; MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int i, my_id, pstart, index; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION pstart = 0; #else pstart = my_id; #endif #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i, row_index) HYPRE_SMP_SCHEDULE #endif for (i=0; i < nrows; i++) { row_index = rows[i]; if (row_index >= row_partitioning[pstart] && row_index < row_partitioning[pstart+1]) { /* compute local row number */ index = (HYPRE_Int)(row_index - row_partitioning[pstart]); ncols[i] = diag_i[index+1]-diag_i[index]+offd_i[index+1]-offd_i[index]; } else { ncols[i] = 0; if (print_level) { hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row_index, my_id); } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixGetValuesParCSR * * gets values of an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixGetValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, HYPRE_BigInt *rows, HYPRE_BigInt *cols, HYPRE_Complex *values) { MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject(matrix); HYPRE_Int assemble_flag = hypre_IJMatrixAssembleFlag(matrix); hypre_CSRMatrix *diag; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; hypre_CSRMatrix *offd; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Complex *offd_data; HYPRE_BigInt *col_map_offd; HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(par_matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); #endif HYPRE_Int i, j, n, ii, indx, pstart; HYPRE_Int num_procs, my_id; HYPRE_BigInt col_0, col_n, row, col_indx, first; HYPRE_Int row_local, row_size; HYPRE_Int warning = 0; HYPRE_Int *counter; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); if (assemble_flag == 0) { hypre_error_in_arg(1); if (print_level) { hypre_printf("Error! Matrix not assembled yet! HYPRE_IJMatrixGetValues\n"); } } #ifdef HYPRE_NO_GLOBAL_PARTITION col_0 = col_starts[0]; col_n = col_starts[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; #else col_0 = col_starts[my_id]; col_n = col_starts[my_id+1]-1; first = col_partitioning[0]; pstart = my_id; #endif diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); } if (nrows < 0) { nrows = -nrows; counter = hypre_CTAlloc(HYPRE_Int, nrows+1, HYPRE_MEMORY_HOST); counter[0] = 0; for (i=0; i < nrows; i++) { counter[i+1] = counter[i]+ncols[i]; } indx = 0; for (i=0; i < nrows; i++) { row = rows[i]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); row_size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (counter[i]+row_size > counter[nrows]) { hypre_error_in_arg(1); if (print_level) { hypre_printf ("Error! Not enough memory! HYPRE_IJMatrixGetValues\n"); } } if (ncols[i] < row_size) { warning = 1; } for (j = diag_i[row_local]; j < diag_i[row_local+1]; j++) { cols[indx] = (HYPRE_BigInt)diag_j[j] + col_0; values[indx++] = diag_data[j]; } for (j = offd_i[row_local]; j < offd_i[row_local+1]; j++) { cols[indx] = col_map_offd[offd_j[j]]; values[indx++] = offd_data[j]; } counter[i+1] = indx; } else { if (print_level) { hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id); } } } if (warning) { for (i=0; i < nrows; i++) { ncols[i] = counter[i+1] - counter[i]; } if (print_level) { hypre_printf ("Warning! ncols has been changed!\n"); } } hypre_TFree(counter, HYPRE_MEMORY_HOST); } else { indx = 0; for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols[ii]; if (n == 0) /* empty row */ { continue; } if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ for (i=0; i < n; i++) { col_indx = cols[indx] - first; values[indx] = 0.0; if (col_indx < col_0 || col_indx > col_n) /* search in offd */ { for (j=offd_i[row_local]; j < offd_i[row_local+1]; j++) { if (col_map_offd[offd_j[j]] == col_indx) { values[indx] = offd_data[j]; break; } } } else /* search in diag */ { col_indx = col_indx - col_0; for (j=diag_i[row_local]; j < diag_i[row_local+1]; j++) { if (diag_j[j] == (HYPRE_Int)col_indx) { values[indx] = diag_data[j]; break; } } } indx++; } } else { if (print_level) { hypre_printf ("Warning! Row %b is not on Proc. %d!\n", row, my_id); } } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetValuesParCSR * * sets values in an IJMatrix before assembly, * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_Int row_local; //HYPRE_Int row_len; HYPRE_BigInt col_0, col_n, row; HYPRE_Int i, ii, j, n, not_found; //HYPRE_Int col_indx, cnt1; HYPRE_BigInt **aux_j; HYPRE_BigInt *local_j; HYPRE_BigInt *tmp_j; HYPRE_Complex **aux_data; HYPRE_Complex *local_data; HYPRE_Complex *tmp_data; HYPRE_Int diag_space, offd_space; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int tmp_indx, indx; HYPRE_Int space, size, old_size; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; HYPRE_Int offd_indx, diag_indx; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Complex *offd_data; HYPRE_BigInt first; HYPRE_Int pstart; /*HYPRE_Int current_num_elmts;*/ /*HYPRE_Int max_off_proc_elmts;*/ //HYPRE_Int off_proc_i_indx; //HYPRE_BigInt *off_proc_i; //HYPRE_BigInt *off_proc_j; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); /*HYPRE_Complex *off_proc_data;*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); #ifdef HYPRE_NO_GLOBAL_PARTITION col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; #else col_0 = col_partitioning[my_id]; col_n = col_partitioning[my_id+1]-1; first = col_partitioning[0]; pstart = my_id; #endif if (nrows < 0) { hypre_error_in_arg(2); if (print_level) { hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n"); } } if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/ { HYPRE_BigInt *col_map_offd; HYPRE_Int num_cols_offd; HYPRE_Int j_offd; for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (n > size) /* Should we change this and allow this? This could be same column index, i.e. only last value is set, previous ones overwritten. */ { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" row %b too long! \n", row); } return hypre_error_flag; } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local+1]; len_offd = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } for (j=pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } /* return -1;*/ return hypre_error_flag; } diag_data[pos_diag] = values[indx]; } else /* insert into diag */ { for (j=pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } /* return -1; */ return hypre_error_flag; } } indx++; } } } } else { aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } else { tmp_j = NULL; } tmp_indx = 0; not_found = 1; size = old_size; for (i=0; i < n; i++) { for (j=0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size+tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt, size+tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size+tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i=0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_BigInt *big_offd_j; HYPRE_Int col_j; offd_indx =hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx =hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { big_offd_j = hypre_CSRMatrixBigJ(offd); offd_data = hypre_CSRMatrixData(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local+1]; offd_space = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j=offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } /* return 1; */ return hypre_error_flag; } } not_found = 1; } else /* insert into diag */ { col_j = (HYPRE_Int)(cols[indx]-col_0); for (j=diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == col_j) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = col_j; diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } /* return 1; */ return hypre_error_flag; } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixSetConstantValuesParCSR * * sets all values in an already assembled IJMatrix to a constant value. * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetConstantValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Complex value ) { if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/ { hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int nnz_diag = hypre_CSRMatrixNumNonzeros(diag); HYPRE_Int nnz_offd = hypre_CSRMatrixNumNonzeros(offd); #if defined(HYPRE_USING_CUDA) if (hypre_GetExecPolicy1(hypre_IJMatrixMemoryLocation(matrix)) == HYPRE_EXEC_DEVICE) { HYPRE_THRUST_CALL( fill_n, diag_data, nnz_diag, value ); HYPRE_THRUST_CALL( fill_n, offd_data, nnz_offd, value ); } else #endif { HYPRE_Int ii; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < nnz_diag; ii++) { diag_data[ii] = value; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < nnz_offd; ii++) { offd_data[ii] = value; } } } else { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Matrix not assembled! Required to set constant values!"); } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixAddToValuesParCSR * * adds row values to an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixAddToValuesParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_Int row_local; HYPRE_BigInt row; HYPRE_BigInt col_0, col_n; HYPRE_Int i, ii, j, n, not_found; HYPRE_BigInt **aux_j; HYPRE_BigInt *local_j; HYPRE_BigInt *tmp_j; HYPRE_Complex **aux_data; HYPRE_Complex *local_data; HYPRE_Complex *tmp_data; HYPRE_Int diag_space, offd_space; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int tmp_indx, indx; HYPRE_Int space, size, old_size; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; HYPRE_Int offd_indx, diag_indx; HYPRE_BigInt first; HYPRE_Int pstart; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Complex *offd_data; HYPRE_Int current_num_elmts; HYPRE_Int max_off_proc_elmts; HYPRE_Int off_proc_i_indx; HYPRE_BigInt *off_proc_i; HYPRE_BigInt *off_proc_j; HYPRE_Complex *off_proc_data; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); #ifdef HYPRE_NO_GLOBAL_PARTITION col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; #else col_0 = col_partitioning[my_id]; col_n = col_partitioning[my_id+1]-1; first = col_partitioning[0]; pstart = my_id; #endif if (hypre_IJMatrixAssembleFlag(matrix)) { HYPRE_Int num_cols_offd; HYPRE_BigInt *col_map_offd; HYPRE_Int j_offd; /* AB - 4/06 - need to get this object*/ aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (n > size) /* Should we change this and allow this? This could be same column index, i.e. only last value is set, previous ones overwritten. */ { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" row %b too long! \n", row); } return hypre_error_flag; } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local+1]; len_offd = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; /* return -1; */ } for (j=pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } diag_data[pos_diag] += values[indx]; } else /* insert into diag */ { for (j=pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } return hypre_error_flag; } } indx++; } } /* not my row */ else { if (!aux_matrix) { size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]); hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); if (!max_off_proc_elmts) { max_off_proc_elmts = hypre_max(n,1000); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcData(aux_matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); } else if (current_num_elmts + n > max_off_proc_elmts) { max_off_proc_elmts += 3*n; off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i; hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j; hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data; } /* AB - 4/6 - the row should be negative to indicate an add */ /* UMY - 12/28/09 - now positive since we eliminated the feature of setting on other processors */ /* off_proc_i[off_proc_i_indx++] = row; */ off_proc_i[off_proc_i_indx++] = row; off_proc_i[off_proc_i_indx++] = n; for (i=0; i < n; i++) { off_proc_j[current_num_elmts] = cols[indx]; off_proc_data[current_num_elmts++] = values[indx++]; } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; } } } /* not assembled */ else { aux_matrix = (hypre_AuxParCSRMatrix *) hypre_IJMatrixTranslator(matrix); row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); for (ii=0; ii < nrows; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } else { tmp_j = NULL; } tmp_indx = 0; not_found = 1; size = old_size; for (i=0; i < n; i++) { for (j=0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size+tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local], HYPRE_BigInt, size+tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size+tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i=0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_BigInt *big_offd_j; offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { big_offd_j = hypre_CSRMatrixBigJ(offd); offd_data = hypre_CSRMatrixData(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local+1]; offd_space = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j=offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } /* return 1;*/ return hypre_error_flag; } } not_found = 1; } else /* insert into diag */ { HYPRE_Int col_j = (HYPRE_Int)( cols[indx] - col_0); for (j=diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == col_j) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = col_j; diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } /* return 1; */ return hypre_error_flag; } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } /* not my row */ else { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); if (!max_off_proc_elmts) { max_off_proc_elmts = hypre_max(n,1000); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcData(aux_matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); } else if (current_num_elmts + n > max_off_proc_elmts) { max_off_proc_elmts += 3*n; off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i; hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j; hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data; } off_proc_i[off_proc_i_indx++] = row; off_proc_i[off_proc_i_indx++] = n; for (i=0; i < n; i++) { off_proc_j[current_num_elmts] = cols[indx]; off_proc_data[current_num_elmts++] = values[indx++]; } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; } } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixDestroyParCSR * * frees an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixDestroyParCSR(hypre_IJMatrix *matrix) { hypre_ParCSRMatrixDestroy((hypre_ParCSRMatrix *)hypre_IJMatrixObject(matrix)); hypre_AuxParCSRMatrixDestroy((hypre_AuxParCSRMatrix*)hypre_IJMatrixTranslator(matrix)); return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixAssembleOffProcValsParCSR * * This is for handling set and get values calls to off-proc. entries - * it is called from matrix assemble. There is an alternate version for * when the assumed partition is being used. * *****************************************************************************/ #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix, HYPRE_Int off_proc_i_indx, HYPRE_Int max_off_proc_elmts, HYPRE_Int current_num_elmts, HYPRE_MemoryLocation memory_location, HYPRE_BigInt *off_proc_i, HYPRE_BigInt *off_proc_j, HYPRE_Complex *off_proc_data ) { MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_MPI_Request *requests = NULL; hypre_MPI_Status *status = NULL; HYPRE_Int i, ii, j, j2, jj, n, row_index = 0; HYPRE_BigInt row; HYPRE_Int iii, iid, indx, ip; HYPRE_Int proc_id, num_procs, my_id; HYPRE_Int num_sends, num_sends3; HYPRE_Int num_recvs; HYPRE_Int num_requests; HYPRE_Int vec_start, vec_len; HYPRE_Int *send_procs; HYPRE_Int *chunks; HYPRE_BigInt *send_i; HYPRE_Int *send_map_starts; HYPRE_Int *dbl_send_map_starts; HYPRE_Int *recv_procs; HYPRE_Int *recv_chunks; HYPRE_BigInt *recv_i; HYPRE_Int *recv_vec_starts; HYPRE_Int *dbl_recv_vec_starts; HYPRE_Int *info; HYPRE_Int *int_buffer; HYPRE_Int *proc_id_mem; HYPRE_BigInt *partitioning; HYPRE_Int *displs; HYPRE_Int *recv_buf; HYPRE_Complex *send_data; HYPRE_Complex *recv_data; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm, &my_id); partitioning = hypre_IJMatrixRowPartitioning(matrix); info = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); chunks = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); proc_id_mem = hypre_CTAlloc(HYPRE_Int, off_proc_i_indx/2, HYPRE_MEMORY_HOST); j=0; for (i=0; i < off_proc_i_indx; i++) { row = off_proc_i[i++]; //if (row < 0) row = -row-1; n = (HYPRE_Int)off_proc_i[i]; proc_id = hypre_FindProc(partitioning,row,num_procs); proc_id_mem[j++] = proc_id; info[proc_id] += n; chunks[proc_id]++; } /* determine send_procs and amount of data to be sent */ num_sends = 0; for (i=0; i < num_procs; i++) { if (info[i]) { num_sends++; } } send_procs = hypre_CTAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST); send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); dbl_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); num_sends3 = 3*num_sends; int_buffer = hypre_CTAlloc(HYPRE_Int, 3*num_sends, HYPRE_MEMORY_HOST); j = 0; j2 = 0; send_map_starts[0] = 0; dbl_send_map_starts[0] = 0; for (i=0; i < num_procs; i++) { if (info[i]) { send_procs[j++] = i; send_map_starts[j] = send_map_starts[j-1]+2*chunks[i]+info[i]; dbl_send_map_starts[j] = dbl_send_map_starts[j-1]+info[i]; int_buffer[j2++] = i; int_buffer[j2++] = chunks[i]; int_buffer[j2++] = info[i]; } } hypre_TFree(chunks, HYPRE_MEMORY_HOST); hypre_MPI_Allgather(&num_sends3,1,HYPRE_MPI_INT,info,1,HYPRE_MPI_INT,comm); displs = hypre_CTAlloc(HYPRE_Int, num_procs+1, HYPRE_MEMORY_HOST); displs[0] = 0; for (i=1; i < num_procs+1; i++) { displs[i] = displs[i-1]+info[i-1]; } recv_buf = hypre_CTAlloc(HYPRE_Int, displs[num_procs], HYPRE_MEMORY_HOST); hypre_MPI_Allgatherv(int_buffer,num_sends3,HYPRE_MPI_INT,recv_buf,info,displs, HYPRE_MPI_INT,comm); hypre_TFree(int_buffer, HYPRE_MEMORY_HOST); hypre_TFree(info, HYPRE_MEMORY_HOST); /* determine recv procs and amount of data to be received */ num_recvs = 0; for (j=0; j < displs[num_procs]; j+=3) { if (recv_buf[j] == my_id) { num_recvs++; } } recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST); recv_chunks = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST); recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); dbl_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); j2 = 0; recv_vec_starts[0] = 0; dbl_recv_vec_starts[0] = 0; for (i=0; i < num_procs; i++) { for (j=displs[i]; j < displs[i+1]; j+=3) { if (recv_buf[j] == my_id) { recv_procs[j2] = i; recv_chunks[j2++] = recv_buf[j+1]; recv_vec_starts[j2] = recv_vec_starts[j2-1]+2*recv_buf[j+1] +recv_buf[j+2]; dbl_recv_vec_starts[j2] = dbl_recv_vec_starts[j2-1]+recv_buf[j+2]; } if (j2 == num_recvs) { break; } } } hypre_TFree(recv_buf, HYPRE_MEMORY_HOST); hypre_TFree(displs, HYPRE_MEMORY_HOST); /* set up data to be sent to send procs */ /* send_i contains for each send proc : row no., no. of elmts and column indices, send_data contains corresponding values */ send_i = hypre_CTAlloc(HYPRE_BigInt, send_map_starts[num_sends], HYPRE_MEMORY_HOST); send_data = hypre_CTAlloc(HYPRE_Complex, dbl_send_map_starts[num_sends], HYPRE_MEMORY_HOST); recv_i = hypre_CTAlloc(HYPRE_BigInt, recv_vec_starts[num_recvs], HYPRE_MEMORY_HOST); recv_data = hypre_CTAlloc(HYPRE_Complex, dbl_recv_vec_starts[num_recvs], HYPRE_MEMORY_HOST); j=0; jj=0; for (i=0; i < off_proc_i_indx; i++) { row = off_proc_i[i++]; n = (HYPRE_Int)off_proc_i[i]; proc_id = proc_id_mem[i/2]; indx = hypre_BinarySearch(send_procs,proc_id,num_sends); iii = send_map_starts[indx]; iid = dbl_send_map_starts[indx]; send_i[iii++] = row; send_i[iii++] = (HYPRE_BigInt) n; for (ii = 0; ii < n; ii++) { send_i[iii++] = off_proc_j[jj]; send_data[iid++] = off_proc_data[jj++]; } send_map_starts[indx] = iii; dbl_send_map_starts[indx] = iid; } hypre_TFree(proc_id_mem, HYPRE_MEMORY_HOST); for (i=num_sends; i > 0; i--) { send_map_starts[i] = send_map_starts[i-1]; dbl_send_map_starts[i] = dbl_send_map_starts[i-1]; } send_map_starts[0] = 0; dbl_send_map_starts[0] = 0; num_requests = num_recvs+num_sends; if (num_requests) { requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); } j=0; for (i=0; i < num_recvs; i++) { vec_start = recv_vec_starts[i]; vec_len = recv_vec_starts[i+1] - vec_start; ip = recv_procs[i]; hypre_MPI_Irecv(&recv_i[vec_start], vec_len, HYPRE_MPI_BIG_INT, ip, 0, comm, &requests[j++]); } for (i=0; i < num_sends; i++) { vec_start = send_map_starts[i]; vec_len = send_map_starts[i+1] - vec_start; ip = send_procs[i]; hypre_MPI_Isend(&send_i[vec_start], vec_len, HYPRE_MPI_BIG_INT, ip, 0, comm, &requests[j++]); } if (num_requests) { hypre_MPI_Waitall(num_requests, requests, status); } j=0; for (i=0; i < num_recvs; i++) { vec_start = dbl_recv_vec_starts[i]; vec_len = dbl_recv_vec_starts[i+1] - vec_start; ip = recv_procs[i]; hypre_MPI_Irecv(&recv_data[vec_start], vec_len, HYPRE_MPI_COMPLEX, ip, 0, comm, &requests[j++]); } for (i=0; i < num_sends; i++) { vec_start = dbl_send_map_starts[i]; vec_len = dbl_send_map_starts[i+1] - vec_start; ip = send_procs[i]; hypre_MPI_Isend(&send_data[vec_start], vec_len, HYPRE_MPI_COMPLEX, ip, 0, comm, &requests[j++]); } if (num_requests) { hypre_MPI_Waitall(num_requests, requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); } hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(send_data, HYPRE_MEMORY_HOST); hypre_TFree(send_procs, HYPRE_MEMORY_HOST); hypre_TFree(send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(dbl_send_map_starts, HYPRE_MEMORY_HOST); hypre_TFree(recv_procs, HYPRE_MEMORY_HOST); hypre_TFree(recv_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(dbl_recv_vec_starts, HYPRE_MEMORY_HOST); j = 0; j2 = 0; for (i=0; i < num_recvs; i++) { for (ii=0; ii < recv_chunks[i]; ii++) { row = recv_i[j]; HYPRE_Int rcvi = (HYPRE_Int) recv_i[j+1]; hypre_IJMatrixAddToValuesParCSR(matrix,1,&rcvi,&row,&row_index, &recv_i[j+2],&recv_data[j2]); j2 += recv_i[j+1]; j += recv_i[j+1]+2; } } hypre_TFree(recv_chunks, HYPRE_MEMORY_HOST); hypre_TFree(recv_i, HYPRE_MEMORY_HOST); hypre_TFree(recv_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } #else /* assumed partition version */ HYPRE_Int hypre_IJMatrixAssembleOffProcValsParCSR( hypre_IJMatrix *matrix, HYPRE_Int off_proc_i_indx, HYPRE_Int max_off_proc_elmts, HYPRE_Int current_num_elmts, HYPRE_MemoryLocation memory_location, HYPRE_BigInt *off_proc_i, HYPRE_BigInt *off_proc_j, HYPRE_Complex *off_proc_data ) { MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int i, j, k, in_i; HYPRE_Int myid; HYPRE_Int proc_id, last_proc, prev_id, tmp_id; HYPRE_Int max_response_size; HYPRE_BigInt global_num_cols; HYPRE_BigInt global_first_col; HYPRE_BigInt global_first_row; HYPRE_Int ex_num_contacts = 0, num_rows = 0; HYPRE_BigInt range_start, range_end; HYPRE_Int num_elements; HYPRE_Int storage; HYPRE_Int indx; HYPRE_BigInt row; HYPRE_Int num_ranges, row_index = 0; HYPRE_Int num_recvs; HYPRE_BigInt upper_bound; HYPRE_Int counter; HYPRE_Int num_real_procs; HYPRE_Int /*current_proc,*/ original_proc_indx; HYPRE_BigInt *row_list=NULL; HYPRE_Int *row_list_num_elements=NULL; HYPRE_Int *a_proc_id=NULL, *orig_order=NULL; HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL; HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL; HYPRE_BigInt *ex_contact_buf = NULL; HYPRE_Int *recv_starts=NULL; HYPRE_BigInt *response_buf = NULL; HYPRE_Int *response_buf_starts=NULL; HYPRE_Int *num_rows_per_proc = NULL, *num_elements_total = NULL; HYPRE_Int *argsort_contact_procs = NULL; HYPRE_Int obj_size_bytes, complex_size; HYPRE_BigInt big_int_size; HYPRE_Int tmp_int; HYPRE_BigInt tmp_big_int; HYPRE_BigInt *col_ptr; HYPRE_BigInt *big_int_data = NULL; HYPRE_Int big_int_data_size = 0, complex_data_size = 0; void *void_contact_buf = NULL; void *index_ptr; void *recv_data_ptr; HYPRE_Complex tmp_complex; HYPRE_Complex *col_data_ptr; HYPRE_Complex *complex_data = NULL; hypre_DataExchangeResponse response_obj1, response_obj2; hypre_ProcListElements send_proc_obj; hypre_IJAssumedPart *apart; hypre_MPI_Comm_rank(comm, &myid); global_num_cols = hypre_IJMatrixGlobalNumCols(matrix); global_first_col = hypre_IJMatrixGlobalFirstCol(matrix); global_first_row = hypre_IJMatrixGlobalFirstRow(matrix); if (memory_location == HYPRE_MEMORY_DEVICE) { HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST); HYPRE_BigInt *off_proc_i_h = hypre_TAlloc(HYPRE_BigInt, 2*current_num_elmts, HYPRE_MEMORY_HOST); HYPRE_BigInt *off_proc_j_h = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST); HYPRE_Complex *off_proc_data_h = hypre_TAlloc(HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST); hypre_TMemcpy(tmp, off_proc_i, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_j_h, off_proc_j, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_data_h, off_proc_data, HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); for (i = 0; i < current_num_elmts; i++) { off_proc_i_h[2*i] = tmp[i]; off_proc_i_h[2*i+1] = 1; } off_proc_i_indx = current_num_elmts * 2; off_proc_i = off_proc_i_h; off_proc_j = off_proc_j_h; off_proc_data = off_proc_data_h; hypre_TFree(tmp, HYPRE_MEMORY_HOST); } /* call hypre_IJMatrixAddToValuesParCSR directly inside this function * with one chunk of data */ HYPRE_Int off_proc_nelm_recv_cur = 0; HYPRE_Int off_proc_nelm_recv_max = 0; HYPRE_BigInt *off_proc_i_recv = NULL; HYPRE_BigInt *off_proc_j_recv = NULL; HYPRE_Complex *off_proc_data_recv = NULL; HYPRE_BigInt *off_proc_i_recv_d = NULL; HYPRE_BigInt *off_proc_j_recv_d = NULL; HYPRE_Complex *off_proc_data_recv_d = NULL; num_rows = off_proc_i_indx/2; /* verify that we have created the assumed partition */ if (hypre_IJMatrixAssumedPart(matrix) == NULL) { hypre_IJMatrixCreateAssumedPartition(matrix); } apart = (hypre_IJAssumedPart*) hypre_IJMatrixAssumedPart(matrix); /*if (hypre_ParCSRMatrixAssumedPartition(par_matrix) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(par_matrix); } apart = hypre_ParCSRMatrixAssumedPartition(par_matrix);*/ row_list = hypre_CTAlloc(HYPRE_BigInt, num_rows, HYPRE_MEMORY_HOST); row_list_num_elements = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); a_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); orig_order = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); /* get the assumed processor id for each row */ if (num_rows > 0 ) { for (i=0; i < num_rows; i++) { row = off_proc_i[i*2]; //if (row < 0) row = -row - 1; row_list[i] = row; row_list_num_elements[i] = off_proc_i[i*2+1]; hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row, global_num_cols, &proc_id); a_proc_id[i] = proc_id; orig_order[i] = i; } /* now we need to find the actual order of each row - sort on row - this will result in proc ids sorted also...*/ hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, num_rows -1); /* calculate the number of contacts */ ex_num_contacts = 1; last_proc = a_proc_id[0]; for (i=1; i < num_rows; i++) { if (a_proc_id[i] > last_proc) { ex_num_contacts++; last_proc = a_proc_id[i]; } } } /* now we will go through a create a contact list - need to contact assumed processors and find out who the actual row owner is - we will contact with a range (2 numbers) */ ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST); ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1, HYPRE_MEMORY_HOST); ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts*2, HYPRE_MEMORY_HOST); counter = 0; range_end = -1; for (i=0; i< num_rows; i++) { if (row_list[i] > range_end) { /* assumed proc */ proc_id = a_proc_id[i]; /* end of prev. range */ if (counter > 0) { ex_contact_buf[counter*2 - 1] = row_list[i-1]; } /*start new range*/ ex_contact_procs[counter] = proc_id; ex_contact_vec_starts[counter] = counter*2; ex_contact_buf[counter*2] = row_list[i]; counter++; hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_col, global_num_cols, &range_start, &range_end); } } /* finish the starts */ ex_contact_vec_starts[counter] = counter*2; /* finish the last range */ if (counter > 0) { ex_contact_buf[counter*2 - 1] = row_list[num_rows - 1]; } /* don't allocate space for responses */ /* create response object - can use same fill response as used in the commpkg routine */ response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs; response_obj1.data1 = apart; /* this is necessary so we can fill responses*/ response_obj1.data2 = NULL; max_response_size = 6; /* 6 means we can fit 3 ranges*/ hypre_DataExchangeList(ex_num_contacts, ex_contact_procs, ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt), sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 1, comm, (void**) &response_buf, &response_buf_starts); /* now response_buf contains a proc_id followed by a range upper bound */ hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST); /*how many ranges were returned?*/ num_ranges = response_buf_starts[ex_num_contacts]; num_ranges = num_ranges/2; prev_id = -1; j = 0; counter = 0; num_real_procs = 0; /* loop through ranges - create a list of actual processor ids*/ for (i=0; i<num_ranges; i++) { upper_bound = response_buf[i*2+1]; counter = 0; tmp_id = response_buf[i*2]; /* loop through row_list entries - counting how many are in the range */ while (j < num_rows && row_list[j] <= upper_bound) { real_proc_id[j] = tmp_id; j++; counter++; } if (counter > 0 && tmp_id != prev_id) { num_real_procs++; } prev_id = tmp_id; } /* now we have the list of real processor ids (real_proc_id) - and the number of distinct ones - so now we can set up data to be sent - we have HYPRE_Int data and HYPRE_Complex data. that we will need to pack together */ /* first find out how many rows and elements we need to send per proc - so we can do storage */ ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); num_elements_total = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); counter = 0; if (num_real_procs > 0 ) { ex_contact_procs[0] = real_proc_id[0]; num_rows_per_proc[0] = 1; num_elements_total[0] = row_list_num_elements[orig_order[0]]; /* loop through real procs - these are sorted (row_list is sorted also)*/ for (i=1; i < num_rows; i++) { if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */ { num_rows_per_proc[counter] += 1; /*another row */ num_elements_total[counter] += row_list_num_elements[orig_order[i]]; } else /* new processor */ { counter++; ex_contact_procs[counter] = real_proc_id[i]; num_rows_per_proc[counter] = 1; num_elements_total[counter] = row_list_num_elements[orig_order[i]]; } } } /* to pack together, we need to use the largest obj. size of (HYPRE_Int) and (HYPRE_Complex) - if these are much different, then we are wasting some storage, but I do not think that it will be a large amount since this function should not be used on really large amounts of data anyway*/ big_int_size = sizeof(HYPRE_BigInt); complex_size = sizeof(HYPRE_Complex); obj_size_bytes = hypre_max(big_int_size, complex_size); /* set up data to be sent to send procs */ /* for each proc, ex_contact_buf contains #rows, row #, no. elements, col indicies, col data, row #, no. elements, col indicies, col data, etc. */ /* first calculate total storage and make vec_starts arrays */ storage = 0; ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST); ex_contact_vec_starts[0] = -1; for (i=0; i < num_real_procs; i++) { storage += 1 + 2 * num_rows_per_proc[i] + 2* num_elements_total[i]; ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */ } hypre_TFree(num_elements_total, HYPRE_MEMORY_HOST); /*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/ void_contact_buf = hypre_CTAlloc(char, storage*obj_size_bytes, HYPRE_MEMORY_HOST); index_ptr = void_contact_buf; /* step through with this index */ /* for each proc: #rows, row #, no. elements, col indicies, col data, row #, no. elements, col indicies, col data, etc. */ /* un-sort real_proc_id - we want to access data arrays in order, so cheaper to do this*/ us_real_proc_id = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); for (i=0; i < num_rows; i++) { us_real_proc_id[orig_order[i]] = real_proc_id[i]; } hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST); counter = 0; /* index into data arrays */ prev_id = -1; for (i=0; i < num_rows; i++) { proc_id = us_real_proc_id[i]; /* can't use row list[i] - you loose the negative signs that differentiate add/set values */ row = off_proc_i[i*2]; num_elements = row_list_num_elements[i]; /* find position of this processor */ indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs); in_i = ex_contact_vec_starts[indx]; index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes); /* first time for this processor - add the number of rows to the buffer */ if (in_i < 0) { in_i = -in_i - 1; /* re-calc. index_ptr since in_i was negative */ index_ptr = (void *) ((char *) void_contact_buf + in_i*obj_size_bytes); tmp_int = num_rows_per_proc[indx]; hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; } /* add row # */ hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; /* add number of elements */ hypre_TMemcpy( index_ptr, &num_elements, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; /* now add col indices */ for (j=0; j< num_elements; j++) { tmp_big_int = off_proc_j[counter+j]; /* col number */ hypre_TMemcpy( index_ptr, &tmp_big_int, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i ++; } /* now add data */ for (j=0; j< num_elements; j++) { tmp_complex = off_proc_data[counter++]; /* value */ hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in_i++; } /* increment the indexes to keep track of where we are - we * adjust below to be actual starts*/ ex_contact_vec_starts[indx] = in_i; } /* some clean up */ hypre_TFree(response_buf, HYPRE_MEMORY_HOST); hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST); hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST); hypre_TFree(orig_order, HYPRE_MEMORY_HOST); hypre_TFree(row_list, HYPRE_MEMORY_HOST); hypre_TFree(row_list_num_elements, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST); for (i=num_real_procs; i > 0; i--) { ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1]; } ex_contact_vec_starts[0] = 0; /* now send the data */ /***********************************/ /* first get the integer info in send_proc_obj */ /* the response we expect is just a confirmation*/ response_buf = NULL; response_buf_starts = NULL; /*build the response object*/ /* use the send_proc_obj for the info kept from contacts */ /*estimate inital storage allocation */ send_proc_obj.length = 0; send_proc_obj.storage_length = num_real_procs + 5; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = storage + 20; send_proc_obj.v_elements = hypre_TAlloc(char, obj_size_bytes*send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); response_obj2.fill_response = hypre_FillResponseIJOffProcVals; response_obj2.data1 = NULL; response_obj2.data2 = &send_proc_obj; max_response_size = 0; hypre_DataExchangeList(num_real_procs, ex_contact_procs, void_contact_buf, ex_contact_vec_starts, obj_size_bytes, 0, &response_obj2, max_response_size, 2, comm, (void **) &response_buf, &response_buf_starts); hypre_TFree(response_buf, HYPRE_MEMORY_HOST); hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST); hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST); /* Now we can unpack the send_proc_objects and call set and add to values functions. We unpack messages in a deterministic order, using processor rank */ num_recvs = send_proc_obj.length; argsort_contact_procs = hypre_CTAlloc(HYPRE_Int, num_recvs, HYPRE_MEMORY_HOST); for(i=0; i < num_recvs; i++) { argsort_contact_procs[i] = i; } /* This sort's the id array, but the original indices are stored in * argsort_contact_procs */ hypre_qsort2i( send_proc_obj.id, argsort_contact_procs, 0, num_recvs-1 ); /* alias */ recv_data_ptr = send_proc_obj.v_elements; recv_starts = send_proc_obj.vec_starts; for (i=0; i < num_recvs; i++) { /* Find the current processor in order, and reset recv_data_ptr to that processor's message */ original_proc_indx = argsort_contact_procs[i]; /*current_proc = send_proc_obj.id[i];*/ indx = recv_starts[original_proc_indx]; recv_data_ptr = (void *) ((char *) send_proc_obj.v_elements + indx*obj_size_bytes); /* get the number of rows for this recv */ hypre_TMemcpy( &num_rows, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; for (j=0; j < num_rows; j++) /* for each row: unpack info */ { /* row # */ hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; /* num elements for this row */ hypre_TMemcpy( &num_elements, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; /* col indices */ /* Need to check this again !!!! */ if (big_int_size == obj_size_bytes) { col_ptr = (HYPRE_BigInt *) recv_data_ptr; recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes); } else /* copy data */ { if (big_int_data_size < num_elements) { big_int_data = hypre_TReAlloc(big_int_data, HYPRE_BigInt, num_elements + 10, HYPRE_MEMORY_HOST); } for (k=0; k< num_elements; k++) { hypre_TMemcpy( &big_int_data[k], recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); } col_ptr = big_int_data; } /* col data */ if (complex_size == obj_size_bytes) { col_data_ptr = (HYPRE_Complex *) recv_data_ptr; recv_data_ptr = (void *) ((char *)recv_data_ptr + num_elements*obj_size_bytes); } else /* copy data */ { if (complex_data_size < num_elements) { complex_data = hypre_TReAlloc(complex_data, HYPRE_Complex, num_elements + 10, HYPRE_MEMORY_HOST); } for (k=0; k< num_elements; k++) { hypre_TMemcpy( &complex_data[k], recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); } col_data_ptr = complex_data; } if (memory_location == HYPRE_MEMORY_HOST) { hypre_IJMatrixAddToValuesParCSR(matrix, 1, &num_elements, &row, &row_index, col_ptr, col_data_ptr); } else { HYPRE_Int nelm_new = off_proc_nelm_recv_cur + num_elements; if (nelm_new > off_proc_nelm_recv_max) { off_proc_nelm_recv_max = nelm_new * 2; off_proc_i_recv = hypre_TReAlloc(off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); off_proc_j_recv = hypre_TReAlloc(off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); off_proc_data_recv = hypre_TReAlloc(off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); } HYPRE_Int i; for (i = 0; i < num_elements; i++) { off_proc_i_recv[off_proc_nelm_recv_cur + i] = row; } hypre_TMemcpy(off_proc_j_recv + off_proc_nelm_recv_cur, col_ptr, HYPRE_BigInt, num_elements, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_data_recv + off_proc_nelm_recv_cur, col_data_ptr, HYPRE_Complex, num_elements, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); off_proc_nelm_recv_cur = nelm_new; } indx += (num_elements*2); } } if (memory_location == HYPRE_MEMORY_DEVICE) { off_proc_i_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); off_proc_j_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); off_proc_data_recv_d = hypre_TAlloc(HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_i_recv_d, off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_j_recv_d, off_proc_j_recv, HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_data_recv_d, off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); #if defined(HYPRE_USING_CUDA) hypre_IJMatrixSetAddValuesParCSRDevice(matrix, off_proc_nelm_recv_cur, NULL, off_proc_i_recv_d, NULL, off_proc_j_recv_d, off_proc_data_recv_d, "add"); #endif } hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(argsort_contact_procs, HYPRE_MEMORY_HOST); if (big_int_data) { hypre_TFree(big_int_data, HYPRE_MEMORY_HOST); } if (complex_data) { hypre_TFree(complex_data, HYPRE_MEMORY_HOST); } if (memory_location == HYPRE_MEMORY_DEVICE) { hypre_TFree(off_proc_i, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_j, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_data, HYPRE_MEMORY_HOST); } hypre_TFree(off_proc_i_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_j_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_data_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_i_recv_d, HYPRE_MEMORY_DEVICE); hypre_TFree(off_proc_j_recv_d, HYPRE_MEMORY_DEVICE); hypre_TFree(off_proc_data_recv_d, HYPRE_MEMORY_DEVICE); return hypre_error_flag; } #endif /*-------------------------------------------------------------------- * hypre_FillResponseIJOffProcVals * Fill response function for the previous function (2nd data exchange) *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseIJOffProcVals(void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int index, count, elength; HYPRE_Int object_size; void *index_ptr; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*) ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*) response_obj->data2; object_size = hypre_max(sizeof(HYPRE_BigInt), sizeof(HYPRE_Complex)); hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for vec starts * and id */ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length +=20; /*add space for 20 more contact*/ send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts,HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); if( send_proc_obj->id != NULL) { send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /* current number of elements */ if( send_proc_obj->id != NULL) { send_proc_obj->id[count] = contact_proc; } /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 100); elength += index; send_proc_obj->v_elements = hypre_TReAlloc((char*)send_proc_obj->v_elements, char, elength*object_size, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ index_ptr = (void *) ((char *) send_proc_obj->v_elements + index*object_size); hypre_TMemcpy(index_ptr, p_recv_contact_buf , char, object_size*contact_size, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts[count+1] = index + contact_size; send_proc_obj->length++; /* output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*--------------------------------------------------------------------*/ HYPRE_Int hypre_FindProc(HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length) { HYPRE_Int low, high, m; low = 0; high = list_length; if (value >= list[high] || value < list[low]) { return -1; } else { while (low+1 < high) { m = (low + high) / 2; if (value < list[m]) { high = m; } else if (value >= list[m]) { low = m; } } return low; } } /****************************************************************************** * * hypre_IJMatrixAssembleParCSR * * assembles IJMatrix from AuxParCSRMatrix auxiliary structure *****************************************************************************/ HYPRE_Int hypre_IJMatrixAssembleParCSR(hypre_IJMatrix *matrix) { MPI_Comm comm = hypre_IJMatrixComm(matrix); hypre_ParCSRMatrix *par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject(matrix); hypre_AuxParCSRMatrix *aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); HYPRE_BigInt *row_partitioning = hypre_IJMatrixRowPartitioning(matrix); HYPRE_BigInt *col_partitioning = hypre_IJMatrixColPartitioning(matrix); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *diag_j; HYPRE_Int *offd_j = NULL; HYPRE_Complex *diag_data; HYPRE_Complex *offd_data = NULL; HYPRE_Int i, j, j0; HYPRE_Int num_cols_offd; HYPRE_Int *diag_pos; HYPRE_BigInt *col_map_offd; HYPRE_Int *row_length; HYPRE_BigInt **aux_j; HYPRE_Complex **aux_data; HYPRE_Int my_id, num_procs; HYPRE_Int num_rows; HYPRE_Int i_diag, i_offd; HYPRE_BigInt col_0, col_n; HYPRE_Int nnz_offd; HYPRE_BigInt *big_offd_j; HYPRE_BigInt *tmp_j; HYPRE_Complex temp; #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_BigInt base = hypre_IJMatrixGlobalFirstCol(matrix); #else HYPRE_BigInt base = col_partitioning[0]; #endif HYPRE_Int off_proc_i_indx; HYPRE_Int max_off_proc_elmts; HYPRE_Int current_num_elmts; HYPRE_BigInt *off_proc_i; HYPRE_BigInt *off_proc_j; HYPRE_Complex *off_proc_data; HYPRE_Int offd_proc_elmts; //HYPRE_Int new_off_proc_i_indx; //HYPRE_Int cancel_indx; //HYPRE_Int col_indx; //HYPRE_Int current_indx; //HYPRE_Int current_i; //HYPRE_Int row_len; HYPRE_Int max_num_threads; HYPRE_Int aux_flag, aux_flag_global; max_num_threads = hypre_NumThreads(); /* first find out if anyone has an aux_matrix, and create one if you don't * have one, but other procs do */ aux_flag = 0; aux_flag_global = 0; if (aux_matrix) { aux_flag = 1; } hypre_MPI_Allreduce(&aux_flag, &aux_flag_global, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); if (aux_flag_global && (!aux_flag)) { hypre_MPI_Comm_rank(comm, &my_id); num_rows = (HYPRE_Int)(row_partitioning[my_id+1] - row_partitioning[my_id]); hypre_AuxParCSRMatrixCreate(&aux_matrix, num_rows, num_rows, NULL); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } if (aux_matrix) { /* first delete all cancelled elements */ /*cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix); if (cancel_indx) { current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); col_indx = 0; current_i = 0; current_indx = 0; new_off_proc_i_indx = off_proc_i_indx; for (i=0; i < off_proc_i_indx; i= i+2) { row_len = off_proc_i[i+1]; for (j=0; j < off_proc_i[i+1]; j++) { if (off_proc_j[col_indx] == -1) { col_indx++; row_len--; current_num_elmts--; } else { off_proc_j[current_indx] = off_proc_j[col_indx]; off_proc_data[current_indx++] = off_proc_data[col_indx++]; } } if (row_len) { off_proc_i[current_i] = off_proc_i[i]; off_proc_i[current_i+1] = row_len; current_i += 2; } else { new_off_proc_i_indx -= 2; } } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = new_off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; }*/ off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); hypre_MPI_Allreduce(&off_proc_i_indx, &offd_proc_elmts, 1, HYPRE_MPI_INT, hypre_MPI_SUM, comm); if (offd_proc_elmts) { max_off_proc_elmts=hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); current_num_elmts=hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i=hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j=hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data=hypre_AuxParCSRMatrixOffProcData(aux_matrix); hypre_IJMatrixAssembleOffProcValsParCSR( matrix,off_proc_i_indx, max_off_proc_elmts, current_num_elmts, HYPRE_MEMORY_HOST, off_proc_i, off_proc_j, off_proc_data); } } if (hypre_IJMatrixAssembleFlag(matrix) == 0) { hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION num_rows = (HYPRE_Int)(row_partitioning[1] - row_partitioning[0]); col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; #else num_rows = (HYPRE_Int)(row_partitioning[my_id+1] - row_partitioning[my_id]); col_0 = col_partitioning[my_id]; col_n = col_partitioning[my_id+1]-1; #endif /* move data into ParCSRMatrix if not there already */ if (hypre_AuxParCSRMatrixNeedAux(aux_matrix)) { HYPRE_Int *diag_array, *offd_array; diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); diag_pos = hypre_CTAlloc(HYPRE_Int, num_rows, HYPRE_MEMORY_HOST); i_diag = 0; i_offd = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i, j, i_diag, i_offd) #endif { HYPRE_BigInt *local_j; HYPRE_Complex *local_data; HYPRE_Int rest, size, ns, ne; HYPRE_Int num_threads, my_thread_num; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); size = num_rows/num_threads; rest = num_rows - size*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(size + 1); ne = (my_thread_num+1)*(size + 1); } else { ns = my_thread_num*size + rest; ne = (my_thread_num+1)*size + rest; } i_diag = 0; i_offd = 0; for (i=ns; i < ne; i++) { local_j = aux_j[i]; local_data = aux_data[i]; diag_pos[i] = -1; for (j=0; j < row_length[i]; j++) { if (local_j[j] < col_0 || local_j[j] > col_n) { i_offd++; } else { i_diag++; if ((HYPRE_Int)(local_j[j]-col_0) == i) { diag_pos[i] = j; } } } } diag_array[my_thread_num] = i_diag; offd_array[my_thread_num] = i_offd; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { i_diag = 0; i_offd = 0; for (i = 0; i < num_threads; i++) { i_diag += diag_array[i]; i_offd += offd_array[i]; diag_array[i] = i_diag; offd_array[i] = i_offd; } diag_i[num_rows] = i_diag; offd_i[num_rows] = i_offd; hypre_TFree(hypre_CSRMatrixJ(diag), hypre_CSRMatrixMemoryLocation(diag)); hypre_TFree(hypre_CSRMatrixData(diag), hypre_CSRMatrixMemoryLocation(diag)); hypre_TFree(hypre_CSRMatrixJ(offd), hypre_CSRMatrixMemoryLocation(offd)); hypre_TFree(hypre_CSRMatrixData(offd), hypre_CSRMatrixMemoryLocation(offd)); hypre_TFree(hypre_CSRMatrixBigJ(offd), hypre_CSRMatrixMemoryLocation(offd)); diag_j = hypre_CTAlloc(HYPRE_Int, i_diag, hypre_CSRMatrixMemoryLocation(diag)); diag_data = hypre_CTAlloc(HYPRE_Complex, i_diag, hypre_CSRMatrixMemoryLocation(diag)); offd_j = hypre_CTAlloc(HYPRE_Int, i_offd, hypre_CSRMatrixMemoryLocation(offd)); offd_data = hypre_CTAlloc(HYPRE_Complex, i_offd, hypre_CSRMatrixMemoryLocation(offd)); big_offd_j = hypre_CTAlloc(HYPRE_BigInt, i_offd, hypre_CSRMatrixMemoryLocation(offd)); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num) { i_diag = diag_array[my_thread_num-1]; i_offd = offd_array[my_thread_num-1]; } else { i_diag = 0; i_offd = 0; } for (i=ns; i < ne; i++) { diag_i[i] = i_diag; offd_i[i] = i_offd; local_j = aux_j[i]; local_data = aux_data[i]; if (diag_pos[i] > -1) { diag_j[i_diag] = (HYPRE_Int)(local_j[diag_pos[i]] - col_0); diag_data[i_diag++] = local_data[diag_pos[i]]; } for (j=0; j < row_length[i]; j++) { if (local_j[j] < col_0 || local_j[j] > col_n) { big_offd_j[i_offd] = local_j[j]; offd_data[i_offd++] = local_data[j]; } else if (j != diag_pos[i]) { diag_j[i_diag] = (HYPRE_Int)(local_j[j] - col_0); diag_data[i_diag++] = local_data[j]; } } } } /* end parallel region */ hypre_TFree(diag_array, HYPRE_MEMORY_HOST); hypre_TFree(offd_array, HYPRE_MEMORY_HOST); hypre_CSRMatrixJ(diag) = diag_j; hypre_CSRMatrixData(diag) = diag_data; hypre_CSRMatrixNumNonzeros(diag) = diag_i[num_rows]; if (offd_i[num_rows] > 0) { hypre_CSRMatrixJ(offd) = offd_j; hypre_CSRMatrixBigJ(offd) = big_offd_j; hypre_CSRMatrixData(offd) = offd_data; } hypre_CSRMatrixNumNonzeros(offd) = offd_i[num_rows]; hypre_TFree(diag_pos, HYPRE_MEMORY_HOST); } else { /* move diagonal element into first space */ big_offd_j = hypre_CSRMatrixBigJ(offd); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private (i,j,j0,temp) #endif for (i = 0; i < num_rows; i++) { j0 = diag_i[i]; for (j=j0; j < diag_i[i+1]; j++) { if (diag_j[j] == i) { temp = diag_data[j0]; diag_data[j0] = diag_data[j]; diag_data[j] = temp; diag_j[j] = diag_j[j0]; diag_j[j0] = i; break; } } } offd_j = hypre_CSRMatrixJ(offd); if (!offd_j && offd_i[num_rows]) { offd_j = hypre_CTAlloc(HYPRE_Int, offd_i[num_rows], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixJ(offd) = offd_j; } } /* generate the nonzero rows inside offd and diag by calling */ hypre_CSRMatrixSetRownnz(diag); hypre_CSRMatrixSetRownnz(offd); /* generate col_map_offd */ nnz_offd = offd_i[num_rows]; if (nnz_offd) { tmp_j = hypre_CTAlloc(HYPRE_BigInt, nnz_offd, HYPRE_MEMORY_HOST); for (i=0; i < nnz_offd; i++) { tmp_j[i] = big_offd_j[i]; } hypre_BigQsort0(tmp_j,0,nnz_offd-1); num_cols_offd = 1; for (i=0; i < nnz_offd-1; i++) { if (tmp_j[i+1] > tmp_j[i]) { tmp_j[num_cols_offd++] = tmp_j[i+1]; } } col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd; i++) { col_map_offd[i] = tmp_j[i]; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) #endif for (i=0; i < nnz_offd; i++) { offd_j[i]=hypre_BigBinarySearch(col_map_offd,big_offd_j[i],num_cols_offd); } if (base) { for (i=0; i < num_cols_offd; i++) { col_map_offd[i] -= base; } } hypre_ParCSRMatrixColMapOffd(par_matrix) = col_map_offd; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(big_offd_j, hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = NULL; } hypre_IJMatrixAssembleFlag(matrix) = 1; } hypre_AuxParCSRMatrixDestroy(aux_matrix); hypre_IJMatrixTranslator(matrix) = NULL; return hypre_error_flag; } /****************************************************************************** * * IJMatrix_ParCSR interface * *****************************************************************************/ #include "_hypre_IJ_mv.h" #include "../HYPRE.h" /****************************************************************************** * * hypre_IJMatrixSetValuesOMPParCSR * * sets values in an IJMatrix before assembly, * use of this routine requires that the values in rows are different from each * other, i.e rows[i] != rows[j] for i != j * to ensure accurate threading * *****************************************************************************/ HYPRE_Int hypre_IJMatrixSetValuesOMPParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_BigInt col_0, col_n, first; //HYPRE_Int cancel_indx; HYPRE_BigInt **aux_j; HYPRE_Complex **aux_data; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *big_offd_j; HYPRE_Complex *offd_data; HYPRE_Int pstart; /*HYPRE_Int current_num_elmts;*/ /*HYPRE_Int max_off_proc_elmts;*/ //HYPRE_Int off_proc_i_indx; //HYPRE_BigInt *off_proc_i; //HYPRE_BigInt *off_proc_j; //HYPRE_Int *offproc_cnt; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); //HYPRE_Int max_num_threads; HYPRE_Int error_flag = 0; /*HYPRE_Complex *off_proc_data;*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //max_num_threads = hypre_NumThreads(); par_matrix = (hypre_ParCSRMatrix *) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); //offproc_cnt = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_NO_GLOBAL_PARTITION col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; #else col_0 = col_partitioning[my_id]; col_n = col_partitioning[my_id+1]-1; first = col_partitioning[0]; pstart = my_id; #endif if (nrows < 0) { hypre_error_in_arg(2); if (print_level) { hypre_printf("Error! nrows negative! HYPRE_IJMatrixSetValues\n"); } return hypre_error_flag; } if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled*/ { HYPRE_BigInt *col_map_offd; HYPRE_Int num_cols_offd; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); /*if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix); }*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int j_offd; HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; //HYPRE_Int row_len; HYPRE_Int row_local; HYPRE_Int i, j, ii, n; HYPRE_BigInt row; HYPRE_Int not_found, size, indx; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows/num_threads; rest = nrows - len*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(len+1); ne = (my_thread_num+1)*(len+1); } else { ns = my_thread_num*len+rest; ne = (my_thread_num+1)*len+rest; } for (ii=ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (n > size) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" row %b too long! \n", row); } break; /*return hypre_error_flag; */ } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local+1]; len_offd = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag; */ } for (j=pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag; */ } diag_data[pos_diag] = values[indx]; } else /* insert into diag */ { for (j=pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } } indx++; } } /* processor does not own the row */ //else /*search for previous occurrences and cancel them */ /*{ if (aux_matrix) { col_indx = 0; for (i=0; i < off_proc_i_indx; i=i+2) { row_len = off_proc_i[i+1]; if (off_proc_i[i] == row) { for (j=0; j < n; j++) { cnt1 = col_indx; for (k=0; k < row_len; k++) { if (off_proc_j[cnt1] == cols[j]) { off_proc_j[cnt1++] = -1; offproc_cnt[my_thread_num]++; */ /*cancel_indx++;*/ /* if no repetition allowed */ /* off_proc_j[col_indx] = -1; col_indx -= k; break; */ /*} else { cnt1++; } } } col_indx += row_len; } else { col_indx += row_len; } }*/ /*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/ //} //} } } /*end parallel region */ } else /* matrix not assembled */ { aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); /*if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); cancel_indx = hypre_AuxParCSRMatrixCancelIndx(aux_matrix); }*/ row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); } else { diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { offd_data = hypre_CSRMatrixData(offd); big_offd_j = hypre_CSRMatrixBigJ(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_BigInt *tmp_j = NULL; HYPRE_BigInt *local_j = NULL; HYPRE_Complex *tmp_data = NULL; HYPRE_Complex *local_data = NULL; HYPRE_Int tmp_indx; //HYPRE_Int row_len; HYPRE_Int row_local; HYPRE_Int i, j, ii, n; HYPRE_BigInt row; HYPRE_Int not_found, size, indx; HYPRE_Int old_size, space, cnt; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows/num_threads; rest = nrows - len*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(len+1); ne = (my_thread_num+1)*(len+1); } else { ns = my_thread_num*len+rest; ne = (my_thread_num+1)*len+rest; } for (ii=ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; /* processor owns the row */ if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ if (need_aux) { local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } tmp_indx = 0; not_found = 1; size = old_size; for (i=0; i < n; i++) { for (j=0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size+tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt, size+tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size+tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i=0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_Int offd_indx, diag_indx; HYPRE_Int offd_space, diag_space; HYPRE_Int cnt_diag, cnt_offd; offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local+1]; offd_space = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j=offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } else /* insert into diag */ { for (j=diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] = values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0); diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } /* processor does not own the row */ /*else { if (aux_matrix) { col_indx = 0; for (i=0; i < off_proc_i_indx; i=i+2) { row_len = off_proc_i[i+1]; if (off_proc_i[i] == row) { for (j=0; j < n; j++) { cnt1 = col_indx; for (k=0; k < row_len; k++) { if (off_proc_j[cnt1] == cols[j]) { off_proc_j[cnt1++] = -1; */ /*cancel_indx++;*/ //offproc_cnt[my_thread_num]++; /* if no repetition allowed */ /* off_proc_j[col_indx] = -1; col_indx -= k; break; */ /* } else { cnt1++; } } } col_indx += row_len; } else { col_indx += row_len; } }*/ /*hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx;*/ /*} }*/ } } /* end parallel region */ } /*if (error_flag) { return hypre_error_flag; } if (aux_matrix) { for (i1=0; i1 < max_num_threads; i1++) { cancel_indx += offproc_cnt[i1]; } hypre_AuxParCSRMatrixCancelIndx(aux_matrix) = cancel_indx; }*/ //hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST); return hypre_error_flag; } /****************************************************************************** * * hypre_IJMatrixAddToValuesOMPParCSR * * adds row values to an IJMatrix * *****************************************************************************/ HYPRE_Int hypre_IJMatrixAddToValuesOMPParCSR( hypre_IJMatrix *matrix, HYPRE_Int nrows, HYPRE_Int *ncols, const HYPRE_BigInt *rows, const HYPRE_Int *row_indexes, const HYPRE_BigInt *cols, const HYPRE_Complex *values ) { hypre_ParCSRMatrix *par_matrix; hypre_CSRMatrix *diag, *offd; hypre_AuxParCSRMatrix *aux_matrix; HYPRE_BigInt *row_partitioning; HYPRE_BigInt *col_partitioning; MPI_Comm comm = hypre_IJMatrixComm(matrix); HYPRE_Int num_procs, my_id; HYPRE_BigInt col_0, col_n, first; HYPRE_BigInt **aux_j; HYPRE_Complex **aux_data; HYPRE_Int *row_length, *row_space; HYPRE_Int need_aux; HYPRE_Int pstart; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *diag_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *big_offd_j; HYPRE_Complex *offd_data; HYPRE_Int current_num_elmts; HYPRE_Int max_off_proc_elmts; HYPRE_Int off_proc_i_indx; HYPRE_BigInt *off_proc_i; HYPRE_BigInt *off_proc_j; HYPRE_Complex *off_proc_data; HYPRE_Int **offproc_cnt; HYPRE_Int print_level = hypre_IJMatrixPrintLevel(matrix); HYPRE_Int max_num_threads; HYPRE_Int error_flag = 0; HYPRE_Int i1; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); max_num_threads = hypre_NumThreads(); par_matrix = (hypre_ParCSRMatrix*) hypre_IJMatrixObject( matrix ); row_partitioning = hypre_IJMatrixRowPartitioning(matrix); col_partitioning = hypre_IJMatrixColPartitioning(matrix); offproc_cnt = hypre_CTAlloc(HYPRE_Int *, max_num_threads, HYPRE_MEMORY_HOST); for (i1=0; i1 < max_num_threads; i1++) offproc_cnt[i1] = NULL; #ifdef HYPRE_NO_GLOBAL_PARTITION col_0 = col_partitioning[0]; col_n = col_partitioning[1]-1; first = hypre_IJMatrixGlobalFirstCol(matrix); pstart = 0; #else col_0 = col_partitioning[my_id]; col_n = col_partitioning[my_id+1]-1; first = col_partitioning[0]; pstart = my_id; #endif if (hypre_IJMatrixAssembleFlag(matrix)) /* matrix already assembled */ { HYPRE_Int num_cols_offd; HYPRE_BigInt *col_map_offd; diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); num_cols_offd = hypre_CSRMatrixNumCols(offd); if (num_cols_offd) { col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); offd_j = hypre_CSRMatrixJ(offd); offd_data = hypre_CSRMatrixData(offd); } aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int j_offd; HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_Int pos_diag, pos_offd; HYPRE_Int len_diag, len_offd; HYPRE_Int row_local; HYPRE_Int i, j, ii, n; HYPRE_BigInt row; HYPRE_Int not_found, size, indx; HYPRE_Int *my_offproc_cnt = NULL; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows/num_threads; rest = nrows - len*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(len+1); ne = (my_thread_num+1)*(len+1); } else { ns = my_thread_num*len+rest; ne = (my_thread_num+1)*len+rest; } for (ii=ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ size = diag_i[row_local+1] - diag_i[row_local] + offd_i[row_local+1] - offd_i[row_local]; if (n > size) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" row %b too long! \n", row); } break; /*return hypre_error_flag; */ } pos_diag = diag_i[row_local]; pos_offd = offd_i[row_local]; len_diag = diag_i[row_local+1]; len_offd = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { j_offd = hypre_BigBinarySearch(col_map_offd,cols[indx]-first, num_cols_offd); if (j_offd == -1) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } for (j=pos_offd; j < len_offd; j++) { if (offd_j[j] == j_offd) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } not_found = 1; } /* diagonal element */ else if (cols[indx] == row) { if (diag_j[pos_diag] != row_local) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } diag_data[pos_diag] += values[indx]; } else /* insert into diag */ { for (j=pos_diag; j < len_diag; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf (" Error, element %b %b does not exist\n", row, cols[indx]); } break; /*return hypre_error_flag;*/ } } indx++; } } /* not my row */ /* need to find solution for threaded version!!!! */ /* could save row number and process later .... */ else { if (!my_offproc_cnt) { my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST); offproc_cnt[my_thread_num] = my_offproc_cnt; my_offproc_cnt[0] = 200; my_offproc_cnt[1] = 2; } i = my_offproc_cnt[1]; if (i+2 < my_offproc_cnt[0]) { my_offproc_cnt[i] = ii; my_offproc_cnt[i+1] = indx; my_offproc_cnt[1] += 2; } else { size = my_offproc_cnt[0]; my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST); my_offproc_cnt[0] += 200; my_offproc_cnt[i] = ii; my_offproc_cnt[i+1] = indx; my_offproc_cnt[1] += 2; } } } } /* end parallel region */ } /* not assembled */ else { aux_matrix = (hypre_AuxParCSRMatrix*) hypre_IJMatrixTranslator(matrix); if (aux_matrix) { current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); } row_space = hypre_AuxParCSRMatrixRowSpace(aux_matrix); row_length = hypre_AuxParCSRMatrixRowLength(aux_matrix); need_aux = hypre_AuxParCSRMatrixNeedAux(aux_matrix); if (need_aux) { aux_j = hypre_AuxParCSRMatrixAuxJ(aux_matrix); aux_data = hypre_AuxParCSRMatrixAuxData(aux_matrix); } else { diag = hypre_ParCSRMatrixDiag(par_matrix); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); diag_data = hypre_CSRMatrixData(diag); offd = hypre_ParCSRMatrixOffd(par_matrix); offd_i = hypre_CSRMatrixI(offd); if (num_procs > 1) { big_offd_j = hypre_CSRMatrixBigJ(offd); offd_data = hypre_CSRMatrixData(offd); if (!big_offd_j) { big_offd_j = hypre_CTAlloc(HYPRE_BigInt, offd_i[hypre_CSRMatrixNumRows(offd)], hypre_CSRMatrixMemoryLocation(offd)); hypre_CSRMatrixBigJ(offd) = big_offd_j; } } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads, my_thread_num; HYPRE_Int len, rest, ns, ne; HYPRE_BigInt *tmp_j = NULL; HYPRE_BigInt *local_j = NULL; HYPRE_Complex *tmp_data = NULL; HYPRE_Complex *local_data = NULL; HYPRE_Int tmp_indx; HYPRE_Int row_local; HYPRE_BigInt row; HYPRE_Int i, j, ii, n; HYPRE_Int not_found, size, indx; HYPRE_Int old_size, space, cnt; HYPRE_Int *my_offproc_cnt = NULL; num_threads = hypre_NumActiveThreads(); my_thread_num = hypre_GetThreadNum(); len = nrows/num_threads; rest = nrows - len*num_threads; if (my_thread_num < rest) { ns = my_thread_num*(len+1); ne = (my_thread_num+1)*(len+1); } else { ns = my_thread_num*len+rest; ne = (my_thread_num+1)*len+rest; } for (ii=ns; ii < ne; ii++) { row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = row_indexes[ii]; if (row >= row_partitioning[pstart] && row < row_partitioning[pstart+1]) { row_local = (HYPRE_Int)(row - row_partitioning[pstart]); /* compute local row number */ if (need_aux) { local_j = aux_j[row_local]; local_data = aux_data[row_local]; space = row_space[row_local]; old_size = row_length[row_local]; size = space - old_size; if (size < n) { size = n - size; tmp_j = hypre_CTAlloc(HYPRE_BigInt, size, HYPRE_MEMORY_HOST); tmp_data = hypre_CTAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); } tmp_indx = 0; not_found = 1; size = old_size; for (i=0; i < n; i++) { for (j=0; j < old_size; j++) { if (local_j[j] == cols[indx]) { local_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (size < space) { local_j[size] = cols[indx]; local_data[size++] = values[indx]; } else { tmp_j[tmp_indx] = cols[indx]; tmp_data[tmp_indx++] = values[indx]; } } not_found = 1; indx++; } row_length[row_local] = size+tmp_indx; if (tmp_indx) { aux_j[row_local] = hypre_TReAlloc(aux_j[row_local],HYPRE_BigInt, size+tmp_indx, HYPRE_MEMORY_HOST); aux_data[row_local] = hypre_TReAlloc(aux_data[row_local], HYPRE_Complex, size+tmp_indx, HYPRE_MEMORY_HOST); row_space[row_local] = size+tmp_indx; local_j = aux_j[row_local]; local_data = aux_data[row_local]; } cnt = size; for (i=0; i < tmp_indx; i++) { local_j[cnt] = tmp_j[i]; local_data[cnt++] = tmp_data[i]; } if (tmp_j) { hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_data, HYPRE_MEMORY_HOST); } } else /* insert immediately into data in ParCSRMatrix structure */ { HYPRE_Int offd_indx, diag_indx; HYPRE_Int offd_space, diag_space; HYPRE_Int cnt_diag, cnt_offd; offd_indx = hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local]; diag_indx = hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local]; cnt_diag = diag_indx; cnt_offd = offd_indx; diag_space = diag_i[row_local+1]; offd_space = offd_i[row_local+1]; not_found = 1; for (i=0; i < n; i++) { if (cols[indx] < col_0 || cols[indx] > col_n) /* insert into offd */ { for (j=offd_i[row_local]; j < offd_indx; j++) { if (big_offd_j[j] == cols[indx]) { offd_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_offd < offd_space) { big_offd_j[cnt_offd] = cols[indx]; offd_data[cnt_offd++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements!\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } else /* insert into diag */ { for (j=diag_i[row_local]; j < diag_indx; j++) { if (diag_j[j] == (HYPRE_Int)(cols[indx]-col_0)) { diag_data[j] += values[indx]; not_found = 0; break; } } if (not_found) { if (cnt_diag < diag_space) { diag_j[cnt_diag] = (HYPRE_Int)(cols[indx]-col_0); diag_data[cnt_diag++] = values[indx]; } else { hypre_error(HYPRE_ERROR_GENERIC); #ifdef HYPRE_USING_OPENMP #pragma omp atomic #endif error_flag++; if (print_level) { hypre_printf("Error in row %b ! Too many elements !\n", row); } break; /*return hypre_error_flag;*/ } } not_found = 1; } indx++; } hypre_AuxParCSRMatrixIndxDiag(aux_matrix)[row_local] = cnt_diag; hypre_AuxParCSRMatrixIndxOffd(aux_matrix)[row_local] = cnt_offd; } } /* not my row */ else { if (!my_offproc_cnt) { my_offproc_cnt = hypre_CTAlloc(HYPRE_Int, 200, HYPRE_MEMORY_HOST); offproc_cnt[my_thread_num] = my_offproc_cnt; my_offproc_cnt[0] = 200; my_offproc_cnt[1] = 2; } i = my_offproc_cnt[1]; if (i+2 < my_offproc_cnt[0]) { my_offproc_cnt[i] = ii; my_offproc_cnt[i+1] = indx; my_offproc_cnt[1] += 2; } else { size = my_offproc_cnt[0]; my_offproc_cnt = hypre_TReAlloc(my_offproc_cnt, HYPRE_Int, size+200, HYPRE_MEMORY_HOST); my_offproc_cnt[0] += 200; my_offproc_cnt[i] = ii; my_offproc_cnt[i+1] = indx; my_offproc_cnt[1] += 2; } } } } /*end parallel region */ } if (error_flag) { return hypre_error_flag; } if (!aux_matrix) { HYPRE_Int size = (HYPRE_Int)(row_partitioning[pstart+1]-row_partitioning[pstart]); hypre_AuxParCSRMatrixCreate(&aux_matrix, size, size, NULL); hypre_AuxParCSRMatrixNeedAux(aux_matrix) = 0; hypre_IJMatrixTranslator(matrix) = aux_matrix; } for (i1 = 0; i1 < max_num_threads; i1++) { if (offproc_cnt[i1]) { HYPRE_Int *my_offproc_cnt = offproc_cnt[i1]; HYPRE_Int i, i2, ii, n, indx; HYPRE_BigInt row; for (i2 = 2; i2 < my_offproc_cnt[1]; i2+=2) { ii = my_offproc_cnt[i2]; row = rows[ii]; n = ncols ? ncols[ii] : 1; if (n == 0) /* empty row */ { continue; } indx = my_offproc_cnt[i2+1]; current_num_elmts = hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix); max_off_proc_elmts = hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix); off_proc_i_indx = hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); if (!max_off_proc_elmts) { max_off_proc_elmts = hypre_max(n,1000); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixOffProcData(aux_matrix) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_i = hypre_AuxParCSRMatrixOffProcI(aux_matrix); off_proc_j = hypre_AuxParCSRMatrixOffProcJ(aux_matrix); off_proc_data = hypre_AuxParCSRMatrixOffProcData(aux_matrix); } else if (current_num_elmts + n > max_off_proc_elmts) { max_off_proc_elmts += 3*n; off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, 2*max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_j = hypre_TReAlloc(off_proc_j, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_data = hypre_TReAlloc(off_proc_data,HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParCSRMatrixMaxOffProcElmts(aux_matrix) = max_off_proc_elmts; hypre_AuxParCSRMatrixOffProcI(aux_matrix) = off_proc_i; hypre_AuxParCSRMatrixOffProcJ(aux_matrix) = off_proc_j; hypre_AuxParCSRMatrixOffProcData(aux_matrix) = off_proc_data; } off_proc_i[off_proc_i_indx++] = row; off_proc_i[off_proc_i_indx++] = n; for (i=0; i < n; i++) { off_proc_j[current_num_elmts] = cols[indx]; off_proc_data[current_num_elmts++] = values[indx++]; } hypre_AuxParCSRMatrixOffProcIIndx(aux_matrix) = off_proc_i_indx; hypre_AuxParCSRMatrixCurrentOffProcElmts(aux_matrix) = current_num_elmts; } hypre_TFree(offproc_cnt[i1], HYPRE_MEMORY_HOST); } } hypre_TFree(offproc_cnt, HYPRE_MEMORY_HOST); return hypre_error_flag; }
test_nvector_openmpdev.c
/* ----------------------------------------------------------------- * Programmer(s): David J. Gardner @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2019, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the testing routine to check the OpenMP 4.5 NVECTOR * module implementation. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <sundials/sundials_types.h> #include <nvector/nvector_openmpdev.h> #include <sundials/sundials_math.h> #include "test_nvector.h" #include <omp.h> /* OpenMPDEV vector specific tests */ int Test_N_VMake_OpenMPDEV(N_Vector X, sunindextype length, int myid); /* ---------------------------------------------------------------------- * Main NVector Testing Routine * --------------------------------------------------------------------*/ int main(int argc, char *argv[]) { int fails = 0; /* counter for test failures */ int retval; /* function return value */ sunindextype length; /* vector length */ N_Vector U, V, W, X, Y, Z; /* test vectors */ int print_timing; /* turn timing on/off */ /* check input and set vector length */ if (argc < 3){ printf("ERROR: TWO (2) Inputs required: vector length and print timing \n"); return(-1); } length = atol(argv[1]); if (length <= 0) { printf("ERROR: length of vector must be a positive integer \n"); return(-1); } print_timing = atoi(argv[2]); SetTiming(print_timing, 0); printf("Testing the OpenMP DEV N_Vector \n"); printf("Vector length %ld \n", (long int) length); printf("\n omp_get_default_device = %d \n", omp_get_default_device()); printf("\n omp_get_num_devices = %d \n", omp_get_num_devices()); printf("\n omp_get_initial_device = %d \n", omp_get_initial_device()); printf("\n omp_is_initial_device = %d \n", omp_is_initial_device()); /* Create new vectors */ W = N_VNewEmpty_OpenMPDEV(length); if (W == NULL) { printf("FAIL: Unable to create a new empty vector \n\n"); return(1); } X = N_VNew_OpenMPDEV(length); if (X == NULL) { N_VDestroy(W); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* Check vector ID */ fails += Test_N_VGetVectorID(X, SUNDIALS_NVEC_OPENMPDEV, 0); /* Test clone functions */ fails += Test_N_VCloneEmpty(X, 0); fails += Test_N_VClone(X, length, 0); fails += Test_N_VCloneEmptyVectorArray(5, X, 0); fails += Test_N_VCloneVectorArray(5, X, length, 0); /* Clone additional vectors for testing */ Y = N_VClone(X); if (Y == NULL) { N_VDestroy(W); N_VDestroy(X); printf("FAIL: Unable to create a new vector \n\n"); return(1); } Z = N_VClone(X); if (Z == NULL) { N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* Standard vector operation tests */ printf("\nTesting standard vector operations:\n\n"); fails += Test_N_VConst(X, length, 0); fails += Test_N_VLinearSum(X, Y, Z, length, 0); fails += Test_N_VProd(X, Y, Z, length, 0); fails += Test_N_VDiv(X, Y, Z, length, 0); fails += Test_N_VScale(X, Z, length, 0); fails += Test_N_VAbs(X, Z, length, 0); fails += Test_N_VInv(X, Z, length, 0); fails += Test_N_VAddConst(X, Z, length, 0); fails += Test_N_VDotProd(X, Y, length, length, 0); fails += Test_N_VMaxNorm(X, length, 0); fails += Test_N_VWrmsNorm(X, Y, length, 0); fails += Test_N_VWrmsNormMask(X, Y, Z, length, length, 0); fails += Test_N_VMin(X, length, 0); fails += Test_N_VWL2Norm(X, Y, length, length, 0); fails += Test_N_VL1Norm(X, length, length, 0); fails += Test_N_VCompare(X, Z, length, 0); fails += Test_N_VInvTest(X, Z, length, 0); fails += Test_N_VConstrMask(X, Y, Z, length, 0); fails += Test_N_VMinQuotient(X, Y, length, 0); /* Fused and vector array operations tests (disabled) */ printf("\nTesting fused and vector array operations (disabled):\n\n"); /* create vector and disable all fused and vector array operations */ U = N_VNew_OpenMPDEV(length); retval = N_VEnableFusedOps_OpenMPDEV(U, SUNFALSE); if (U == NULL || retval != 0) { N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* fused operations */ fails += Test_N_VLinearCombination(U, length, 0); fails += Test_N_VScaleAddMulti(U, length, 0); fails += Test_N_VDotProdMulti(U, length, length, 0); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(U, length, 0); fails += Test_N_VScaleVectorArray(U, length, 0); fails += Test_N_VConstVectorArray(U, length, 0); fails += Test_N_VWrmsNormVectorArray(U, length, 0); fails += Test_N_VWrmsNormMaskVectorArray(U, length, length, 0); fails += Test_N_VScaleAddMultiVectorArray(U, length, 0); fails += Test_N_VLinearCombinationVectorArray(U, length, 0); /* Fused and vector array operations tests (enabled) */ printf("\nTesting fused and vector array operations (enabled):\n\n"); /* create vector and enable all fused and vector array operations */ V = N_VNew_OpenMPDEV(length); retval = N_VEnableFusedOps_OpenMPDEV(V, SUNTRUE); if (V == NULL || retval != 0) { N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); N_VDestroy(U); printf("FAIL: Unable to create a new vector \n\n"); return(1); } /* fused operations */ fails += Test_N_VLinearCombination(V, length, 0); fails += Test_N_VScaleAddMulti(V, length, 0); fails += Test_N_VDotProdMulti(V, length, length, 0); /* vector array operations */ fails += Test_N_VLinearSumVectorArray(V, length, 0); fails += Test_N_VScaleVectorArray(V, length, 0); fails += Test_N_VConstVectorArray(V, length, 0); fails += Test_N_VWrmsNormVectorArray(V, length, 0); fails += Test_N_VWrmsNormMaskVectorArray(V, length, length, 0); fails += Test_N_VScaleAddMultiVectorArray(V, length, 0); fails += Test_N_VLinearCombinationVectorArray(V, length, 0); /* Free vectors */ N_VDestroy(U); N_VDestroy(V); N_VDestroy(W); N_VDestroy(X); N_VDestroy(Y); N_VDestroy(Z); /* Print result */ if (fails) { printf("FAIL: NVector module failed %i tests \n\n", fails); } else { printf("SUCCESS: NVector module passed all tests \n\n"); } return(fails); } /* ---------------------------------------------------------------------- * OpenMPDEV specific tests * --------------------------------------------------------------------*/ /* -------------------------------------------------------------------- * Test for the CUDA N_Vector N_VMake_OpenMPDEV function. Requires N_VConst * to check data. */ int Test_N_VMake_OpenMPDEV(N_Vector X, sunindextype length, int myid) { int failure = 0; realtype *h_data, *d_data; N_Vector Y; N_VConst(NEG_HALF, X); N_VCopyFromDevice_OpenMPDEV(X); h_data = N_VGetHostArrayPointer_OpenMPDEV(X); d_data = N_VGetDeviceArrayPointer_OpenMPDEV(X); /* Case 1: h_data and d_data are not null */ Y = N_VMake_OpenMPDEV(length, h_data, d_data); if (Y == NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid); printf(" Vector is NULL \n \n"); return(1); } if (N_VGetHostArrayPointer_OpenMPDEV(Y) == NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid); printf(" Vector host data == NULL \n \n"); N_VDestroy(Y); return(1); } if (N_VGetDeviceArrayPointer_OpenMPDEV(Y) == NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV, Proc %d \n", myid); printf(" Vector device data -= NULL \n \n"); N_VDestroy(Y); return(1); } failure += check_ans(NEG_HALF, Y, length); if (failure) { printf(">>> FAILED test -- N_VMake_OpenMPDEV Case 1, Proc %d \n", myid); printf(" Failed N_VConst check \n \n"); N_VDestroy(Y); return(1); } if (myid == 0) { printf("PASSED test -- N_VMake_OpenMPDEV Case 1 \n"); } N_VDestroy(Y); /* Case 2: data is null */ Y = N_VMake_OpenMPDEV(length, NULL, NULL); if (Y != NULL) { printf(">>> FAILED test -- N_VMake_OpenMPDEV Case 2, Proc %d \n", myid); printf(" Vector is not NULL \n \n"); return(1); } if (myid == 0) { printf("PASSED test -- N_VMake_OpenMPDEV Case 2 \n"); } N_VDestroy(Y); return(failure); } /* ---------------------------------------------------------------------- * Implementation specific utility functions for vector tests * --------------------------------------------------------------------*/ int check_ans(realtype ans, N_Vector X, sunindextype local_length) { int failure = 0; sunindextype i; realtype *Xdata; N_VCopyFromDevice_OpenMPDEV(X); Xdata = N_VGetHostArrayPointer_OpenMPDEV(X); /* check vector data */ for (i = 0; i < local_length; i++) { failure += FNEQ(Xdata[i], ans); } return (failure > ZERO) ? (1) : (0); } booleantype has_data(N_Vector X) { realtype *Xdata = N_VGetHostArrayPointer_OpenMPDEV(X); if (Xdata == NULL) return SUNFALSE; else return SUNTRUE; } void set_element(N_Vector X, sunindextype i, realtype val) { realtype *xdev; int dev; xdev = N_VGetDeviceArrayPointer_OpenMPDEV(X); dev = omp_get_default_device(); #pragma omp target map(to:val) is_device_ptr(xdev) device(dev) { xdev[i] = val; } } realtype get_element(N_Vector X, sunindextype i) { realtype *data; N_VCopyFromDevice_OpenMPDEV(X); data = N_VGetHostArrayPointer_OpenMPDEV(X); return data[i]; } double max_time(N_Vector X, double time) { /* not running in parallel, just return input time */ return(time); } void sync_device() { /* not running on DEV, just return */ return; }
util.c
/** @file @brief Ulitity funcitons for APPFS ex10. @author Tri-Peter Shrive */ #include "ex10.h" /** sets entry at index of prime numbers to 1 */ int get_primes( unsigned int* is_prime, /**< pointer to array of size ceiling, with memory set to zero */ unsigned int ceiling /**< only numbers below this value will be assessed for primality */ ) { unsigned int rest; is_prime[2] = 1; unsigned int count = 1; unsigned int last_div; unsigned int i; unsigned int j; #ifdef THREADS #pragma omp parallel for default(none) shared(is_prime, ceiling, count) private(i, j, rest, last_div) num_threads(THREADS) #endif for( i = 3; i < ceiling; i += 2 ) { last_div = ( unsigned int ) ceil( sqrt( i ) ); for( j = 2; j <= last_div; j++ ) { rest = i % j; if( 0 == rest ) break; else if(j == last_div ) { #ifdef THREADS # pragma omp atomic #endif count++; is_prime[i] = 1; } } } return count; } /** sifts entry up in binary heap */ int sift_up( unsigned int* heap, /**< heap entries */ unsigned int* heap_index, /**< nodes index in heap */ const unsigned long long int* const distance, /**< nodes distance from source */ unsigned int current /**< node to be sifted */ ) { unsigned int a = heap[current]; unsigned long long int dist = distance[a]; unsigned int parent; unsigned int b; while( 1 < current ) { parent = current / 2; b = heap[parent]; if( distance[b] <= dist ) break; heap[current] = b; heap_index[b] = current; current = parent; } heap[current] = a; heap_index[a] = current; return 0; } /** sifts entry down in binary heap */ int sift_down( unsigned int* heap, /**< heap entries */ unsigned int* heap_index, /**< nodes index in heap */ const unsigned long long int* const distance, /**< nodes distance from source */ unsigned int current, /**< node to be sifted */ const unsigned int heap_size /**< size of heap */ ) { unsigned int child = current + current; unsigned int a = heap[current]; unsigned long long int dist = distance[a]; unsigned int b; while( child <= heap_size ) { b = heap[child]; if( child + 1 <= heap_size ) { if( distance[ heap[ child + 1 ] ] < distance[b] ) { child++; b = heap[child]; } } if( distance[b] >= dist ) break; heap[current] = b; heap_index[b] = current; current = child; child += child; } heap[current] = a; heap_index[a] = current; return 0; } /** calculates steiner tree using dijkstra algorithm and additional heuristic @returns number of terminals connected to steiner tree */ int get_steiner_tree( const unsigned int* const is_prime, /**< if index is prime entry is one else zero */ const unsigned int* const terminals, /**< terminal nodes */ const unsigned int num_terminals, /**< number of terminal nodes */ const unsigned int num_nodes, /**< number of nodes */ const unsigned int num_edges, /**< number of edges */ const unsigned int* const first_neighbours_index, /**< index of tails first neighbour in sorted arrays */ const unsigned int* const num_neighbours, /**< number of neighbours for each node */ const unsigned long long int* const sorted_weights, /**< array of weights sorted by tail */ const unsigned int* const sorted_heads, /**< array of heads sorted by tail */ const unsigned int* const sorted_tails, /**< array of tails with an entry for each of their edges */ unsigned int* tree_edges, /**< entries will be set to 1 if sorted lists entries are part of steiner tree else 0*/ unsigned int* prev_edge_index, /**< nodes previous edge in dijkstra */ const unsigned int source /**< source terminal */ ) { assert( is_prime ); assert( terminals ); assert( first_neighbours_index ); assert( sorted_weights ); assert( sorted_heads ); assert( tree_edges ); assert( prev_edge_index ); assert( source < num_nodes ); unsigned int i; unsigned int* heap = NULL; unsigned int* heap_index = NULL; unsigned int* tree_nodes = NULL; unsigned long long int* distance = NULL; // heap[0] reserved for indicating empty heap heap = malloc( ( num_nodes + 1 ) * sizeof( unsigned int ) ); heap_index = malloc( ( num_nodes + 1) * sizeof( unsigned int ) ); tree_nodes = calloc( num_nodes, sizeof( unsigned int ) ); distance = malloc( num_nodes * sizeof( unsigned long long int ) ); assert( heap ); assert( heap_index ); assert( tree_nodes ); assert( distance ); for( i = 0; i < num_nodes; i++ ) { heap[i] = INT_MAX; heap_index[i] = INT_MAX; prev_edge_index[i] = INT_MAX; distance[i] = LLONG_MAX; } heap[num_nodes] = INT_MAX; heap_index[num_nodes] = INT_MAX; unsigned int heap_size = 0; distance[source] = 0; heap_size++; heap[heap_size] = source; heap_index[source] = heap_size; tree_nodes[source] = 1; unsigned int count = 1; unsigned int current; unsigned int tail; unsigned int head; unsigned int root; unsigned int edge_index; unsigned long long int dist; // loop while heap if not empty while( heap_size != 0 ) { // pop priority one node from heap tail = heap[1]; assert( tail < num_nodes ); heap_index[tail] = INT_MAX; root = heap[heap_size]; assert( root < num_nodes ); heap[1] = root; heap_index[root] = 1; heap_size--; sift_down( heap, heap_index, distance, 1, heap_size ); // steiner heuristic // is tail a terminal? if( 1 == is_prime[tail] ) { current = tail; // is this node aleady part of subtree? while( 0 == tree_nodes[current] ) { edge_index = prev_edge_index[current]; // add edge to steiner tree tree_edges[edge_index] = 1; tree_nodes[current] = 1; if( current == tail ) { count++; } // is current node on heap? if( INT_MAX == heap_index[current] ) { // add node with distance zero distance[current] = 0; heap_size++; assert( heap_size != num_nodes + 1 ); heap[heap_size] = current; heap_index[current] = heap_size; sift_up( heap, heap_index, distance, heap_size ); } else { // node already on heap // set distance to zero // and sift up from current position sift_up( heap, heap_index, distance, heap_index[current] ); } current = sorted_tails[edge_index]; } if( count == num_terminals ) break; } // dijkstra algorithm for( i = 0; i < num_neighbours[tail]; i++ ) { // get neighbour edge_index = first_neighbours_index[tail] + i; assert( edge_index < num_edges ); head = sorted_heads[edge_index]; assert( head < num_nodes ); // is node already part of steiner tree? if( 0 == tree_nodes[head] ) { dist = distance[tail] + sorted_weights[edge_index]; assert( dist < LLONG_MAX ); // have we beaten the current best distance? if( dist < distance[head] ) { distance[head] = dist; prev_edge_index[head] = edge_index; // is head already on heap? if( heap_index[head] == INT_MAX ) { // add head at bottom of heap and sift up // until at correct place in priority queue heap_size++; assert( heap_size != num_nodes + 1 ); heap[heap_size] = head; heap_index[head] = heap_size; sift_up( heap, heap_index, distance, heap_size ); } else { // find heap entry for head using heap_index // and sift up sift_up( heap, heap_index, distance, heap_index[head] ); } } } } } free( heap ); free( heap_index ); free( tree_nodes ); free( distance ); return count; } /** verifies steiner tree @returns 1 if steiner tree is connected else 0 */ int is_tree_valid( const unsigned int* const sorted_heads, /** array of heads sorted by tail */ const unsigned int* const sorted_tails, /** array of tails one for each edge */ const unsigned int* const prev_edge_index, /** nodes previous edge in dijkstra */ const unsigned int* const terminals, /**< terminal nodes */ const unsigned int num_terminals, /**< number of terminals */ const unsigned int num_nodes, /**< number of nodes */ const unsigned int num_edges /**< number of nodes */ ) { unsigned int i; unsigned int t; unsigned int prev; unsigned int current; unsigned int* visited = NULL; visited = calloc( num_edges, sizeof( unsigned int ) ); assert( visited ); for( i = 0; i < num_terminals; i++ ) { t = terminals[i]; current = t; while( 0 == visited[current] ) { visited[current] = 1; prev = prev_edge_index[current]; if( INT_MAX == prev ) { if( 0 == i ) break; else { printf("ERROR: terminal %u not connected\n", t); return 0; } } current = sorted_tails[prev]; } } free( visited ); return 1; }
target_data_messages.c
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp -ferror-limit 100 -o - %s void foo() { } int main(int argc, char **argv) { int a; #pragma omp target data // expected-error {{expected at least one map clause for '#pragma omp target data'}} {} L1: foo(); #pragma omp target data map(a) { foo(); goto L1; // expected-error {{use of undeclared label 'L1'}} } goto L2; // expected-error {{use of undeclared label 'L2'}} #pragma omp target data map(a) L2: foo(); #pragma omp target data map(a)(i) // expected-warning {{extra tokens at the end of '#pragma omp target data' are ignored}} { foo(); } #pragma omp target unknown // expected-warning {{extra tokens at the end of '#pragma omp target' are ignored}} { foo(); } return 0; }
text_parser.h
/*! * Copyright (c) 2015 by Contributors * \file text_parser.h * \brief iterator parser to parse text format * \author Tianqi Chen */ #ifndef DMLC_DATA_TEXT_PARSER_H_ #define DMLC_DATA_TEXT_PARSER_H_ #include <dmlc/data.h> #include <dmlc/omp.h> #include <vector> #include <cstring> #include <algorithm> #include "./row_block.h" #include "./parser.h" namespace dmlc { namespace data { /*! * \brief Text parser that parses the input lines * and returns rows in input data */ template <typename IndexType> class TextParserBase : public ParserImpl<IndexType> { public: explicit TextParserBase(InputSplit *source, int nthread) : bytes_read_(0), source_(source) { int maxthread; #pragma omp parallel { maxthread = std::max(omp_get_num_procs() / 2 - 4, 1); } nthread_ = std::min(maxthread, nthread); } virtual ~TextParserBase() { delete source_; } virtual void BeforeFirst(void) { source_->BeforeFirst(); } virtual size_t BytesRead(void) const { return bytes_read_; } virtual bool ParseNext(std::vector<RowBlockContainer<IndexType> > *data) { return FillData(data); } protected: /*! * \brief parse data into out * \param begin beginning of buffer * \param end end of buffer */ virtual void ParseBlock(char *begin, char *end, RowBlockContainer<IndexType> *out) = 0; /*! * \brief read in next several blocks of data * \param data vector of data to be returned * \return true if the data is loaded, false if reach end */ inline bool FillData(std::vector<RowBlockContainer<IndexType> > *data); /*! * \brief start from bptr, go backward and find first endof line * \param bptr end position to go backward * \param begin the beginning position of buffer * \return position of first endof line going backward */ inline char* BackFindEndLine(char *bptr, char *begin) { for (; bptr != begin; --bptr) { if (*bptr == '\n' || *bptr == '\r') return bptr; } return begin; } private: // nthread int nthread_; // number of bytes readed size_t bytes_read_; // source split that provides the data InputSplit *source_; }; // implementation template <typename IndexType> inline bool TextParserBase<IndexType>:: FillData(std::vector<RowBlockContainer<IndexType> > *data) { InputSplit::Blob chunk; if (!source_->NextChunk(&chunk)) return false; const int nthread = omp_get_max_threads(); // reserve space for data data->resize(nthread); bytes_read_ += chunk.size; CHECK_NE(chunk.size, 0U); char *head = reinterpret_cast<char*>(chunk.dptr); #pragma omp parallel num_threads(nthread) { // threadid int tid = omp_get_thread_num(); size_t nstep = (chunk.size + nthread - 1) / nthread; size_t sbegin = std::min(tid * nstep, chunk.size); size_t send = std::min((tid + 1) * nstep, chunk.size); char *pbegin = BackFindEndLine(head + sbegin, head); char *pend; if (tid + 1 == nthread) { pend = head + send; } else { pend = BackFindEndLine(head + send, head); } ParseBlock(pbegin, pend, &(*data)[tid]); } this->data_ptr_ = 0; return true; } } // namespace data } // namespace dmlc #endif // DMLC_DATA_TEXT_PARSER_H_
distort.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT % % D D I SS T O O R R T % % D D I SSS T O O RRRR T % % D D I SS T O O R R T % % DDDD IIIII SSSSS T OOO R R T % % % % % % MagickCore Image Distortion Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % June 2007 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/distort.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/list.h" #include "magick/matrix.h" #include "magick/memory_.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/registry.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/shear.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/transform.h" /* Numerous internal routines for image distortions. */ static inline void AffineArgsToCoefficients(double *affine) { /* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4]; affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3]; } static inline void CoefficientsToAffineArgs(double *coeff) { /* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2]; coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3]; } static void InvertAffineCoefficients(const double *coeff,double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 50 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]); inverse[0]=determinant*coeff[4]; inverse[1]=determinant*(-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]); inverse[3]=determinant*(-coeff[3]); inverse[4]=determinant*coeff[0]; inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]); } static void InvertPerspectiveCoefficients(const double *coeff, double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 53 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]); inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]); inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]); inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]); inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]); inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]); inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]); inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]); } /* * Polynomial Term Defining Functions * * Order must either be an integer, or 1.5 to produce * the 2 number_valuesal polynomial function... * affine 1 (3) u = c0 + c1*x + c2*y * bilinear 1.5 (4) u = '' + c3*x*y * quadratic 2 (6) u = '' + c4*x*x + c5*y*y * cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3 * quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4 * quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5 * number in parenthesis minimum number of points needed. * Anything beyond quintic, has not been implemented until * a more automated way of determining terms is found. * Note the slight re-ordering of the terms for a quadratic polynomial * which is to allow the use of a bi-linear (order=1.5) polynomial. * All the later polynomials are ordered simply from x^N to y^N */ static size_t poly_number_terms(double order) { /* Return the number of terms for a 2d polynomial */ if ( order < 1 || order > 5 || ( order != floor(order) && (order-1.5) > MagickEpsilon) ) return 0; /* invalid polynomial order */ return((size_t) floor((order+1)*(order+2)/2)); } static double poly_basis_fn(ssize_t n, double x, double y) { /* Return the result for this polynomial term */ switch(n) { case 0: return( 1.0 ); /* constant */ case 1: return( x ); case 2: return( y ); /* affine order = 1 terms = 3 */ case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x*x ); case 5: return( y*y ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x*x ); case 7: return( x*x*y ); case 8: return( x*y*y ); case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x*x ); case 11: return( x*x*x*y ); case 12: return( x*x*y*y ); case 13: return( x*y*y*y ); case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x*x ); case 16: return( x*x*x*x*y ); case 17: return( x*x*x*y*y ); case 18: return( x*x*y*y*y ); case 19: return( x*y*y*y*y ); case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */ } return( 0 ); /* should never happen */ } static const char *poly_basis_str(ssize_t n) { /* return the result for this polynomial term */ switch(n) { case 0: return(""); /* constant */ case 1: return("*ii"); case 2: return("*jj"); /* affine order = 1 terms = 3 */ case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */ case 4: return("*ii*ii"); case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */ case 6: return("*ii*ii*ii"); case 7: return("*ii*ii*jj"); case 8: return("*ii*jj*jj"); case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */ case 10: return("*ii*ii*ii*ii"); case 11: return("*ii*ii*ii*jj"); case 12: return("*ii*ii*jj*jj"); case 13: return("*ii*jj*jj*jj"); case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */ case 15: return("*ii*ii*ii*ii*ii"); case 16: return("*ii*ii*ii*ii*jj"); case 17: return("*ii*ii*ii*jj*jj"); case 18: return("*ii*ii*jj*jj*jj"); case 19: return("*ii*jj*jj*jj*jj"); case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */ } return( "UNKNOWN" ); /* should never happen */ } static double poly_basis_dx(ssize_t n, double x, double y) { /* polynomial term for x derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 1.0 ); case 2: return( 0.0 ); /* affine order = 1 terms = 3 */ case 3: return( y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x ); case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x ); case 7: return( x*y ); case 8: return( y*y ); case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x ); case 11: return( x*x*y ); case 12: return( x*y*y ); case 13: return( y*y*y ); case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x ); case 16: return( x*x*x*y ); case 17: return( x*x*y*y ); case 18: return( x*y*y*y ); case 19: return( y*y*y*y ); case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */ } return( 0.0 ); /* should never happen */ } static double poly_basis_dy(ssize_t n, double x, double y) { /* polynomial term for y derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 0.0 ); case 2: return( 1.0 ); /* affine order = 1 terms = 3 */ case 3: return( x ); /* bilinear order = 1.5 terms = 4 */ case 4: return( 0.0 ); case 5: return( y ); /* quadratic order = 2 terms = 6 */ default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */ } /* NOTE: the only reason that last is not true for 'quadratic' is due to the re-arrangement of terms to allow for 'bilinear' */ } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n e T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffineTransformImage() transforms an image as dictated by the affine matrix. % It allocates the memory necessary for the new Image structure and returns % a pointer to the new image. % % The format of the AffineTransformImage method is: % % Image *AffineTransformImage(const Image *image, % AffineMatrix *affine_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o affine_matrix: the affine matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AffineTransformImage(const Image *image, const AffineMatrix *affine_matrix,ExceptionInfo *exception) { double distort[6]; Image *deskew_image; /* Affine transform image. */ assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(affine_matrix != (AffineMatrix *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); distort[0]=affine_matrix->sx; distort[1]=affine_matrix->rx; distort[2]=affine_matrix->ry; distort[3]=affine_matrix->sy; distort[4]=affine_matrix->tx; distort[5]=affine_matrix->ty; deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort, MagickTrue,exception); return(deskew_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e n e r a t e C o e f f i c i e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GenerateCoefficients() takes user provided input arguments and generates % the coefficients, needed to apply the specific distortion for either % distorting images (generally using control points) or generating a color % gradient from sparsely separated color points. % % The format of the GenerateCoefficients() method is: % % Image *GenerateCoefficients(const Image *image,DistortImageMethod method, % const size_t number_arguments,const double *arguments, % size_t number_values, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion/ sparse gradient % % o number_arguments: the number of arguments given. % % o arguments: the arguments for this distortion method. % % o number_values: the style and format of given control points, (caller type) % 0: 2 dimensional mapping of control points (Distort) % Format: u,v,x,y where u,v is the 'source' of the % the color to be plotted, for DistortImage() % N: Interpolation of control points with N values (usally r,g,b) % Format: x,y,r,g,b mapping x,y to color values r,g,b % IN future, variable number of values may be given (1 to N) % % o exception: return any errors or warnings in this structure % % Note that the returned array of double values must be freed by the % calling method using RelinquishMagickMemory(). This however may change in % the future to require a more 'method' specific method. % % Because of this this method should not be classed as stable or used % outside other MagickCore library methods. */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static double *GenerateCoefficients(const Image *image, DistortImageMethod *method,const size_t number_arguments, const double *arguments,size_t number_values,ExceptionInfo *exception) { double *coeff; register size_t i; size_t number_coeff, /* number of coefficients to return (array size) */ cp_size, /* number floating point numbers per control point */ cp_x,cp_y, /* the x,y indexes for control point */ cp_values; /* index of values for this control point */ /* number_values Number of values given per control point */ if ( number_values == 0 ) { /* Image distortion using control points (or other distortion) That is generate a mapping so that x,y->u,v given u,v,x,y */ number_values = 2; /* special case: two values of u,v */ cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */ cp_x = 2; /* location of x,y in input control values */ cp_y = 3; /* NOTE: cp_values, also used for later 'reverse map distort' tests */ } else { cp_x = 0; /* location of x,y in input control values */ cp_y = 1; cp_values = 2; /* and the other values are after x,y */ /* Typically in this case the values are R,G,B color values */ } cp_size = number_values+2; /* each CP defintion involves this many numbers */ /* If not enough control point pairs are found for specific distortions fall back to Affine distortion (allowing 0 to 3 point pairs) */ if ( number_arguments < 4*cp_size && ( *method == BilinearForwardDistortion || *method == BilinearReverseDistortion || *method == PerspectiveDistortion ) ) *method = AffineDistortion; number_coeff=0; switch (*method) { case AffineDistortion: /* also BarycentricColorInterpolate: */ number_coeff=3*number_values; break; case PolynomialDistortion: /* number of coefficents depend on the given polynomal 'order' */ i = poly_number_terms(arguments[0]); number_coeff = 2 + i*number_values; if ( i == 0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Polynomial", "Invalid order, should be interger 1 to 5, or 1.5"); return((double *) NULL); } if ( number_arguments < 1+i*cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Polynomial", (double) i); return((double *) NULL); } break; case BilinearReverseDistortion: number_coeff=4*number_values; break; /* The rest are constants as they are only used for image distorts */ case BilinearForwardDistortion: number_coeff=10; /* 2*4 coeff plus 2 constants */ cp_x = 0; /* Reverse src/dest coords for forward mapping */ cp_y = 1; cp_values = 2; break; #if 0 case QuadraterialDistortion: number_coeff=19; /* BilinearForward + BilinearReverse */ #endif break; case ShepardsDistortion: number_coeff=1; /* The power factor to use */ break; case ArcDistortion: number_coeff=5; break; case ScaleRotateTranslateDistortion: case AffineProjectionDistortion: case Plane2CylinderDistortion: case Cylinder2PlaneDistortion: number_coeff=6; break; case PolarDistortion: case DePolarDistortion: number_coeff=8; break; case PerspectiveDistortion: case PerspectiveProjectionDistortion: number_coeff=9; break; case BarrelDistortion: case BarrelInverseDistortion: number_coeff=10; break; default: perror("unknown method given"); /* just fail assertion */ } /* allocate the array of coefficients needed */ coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff)); if (coeff == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "GenerateCoefficients"); return((double *) NULL); } /* zero out coefficients array */ for (i=0; i < number_coeff; i++) coeff[i] = 0.0; switch (*method) { case AffineDistortion: { /* Affine Distortion v = c0*x + c1*y + c2 for each 'value' given Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Affine", 1.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* handle special cases of not enough arguments */ if ( number_arguments == cp_size ) { /* Only 1 CP Set Given */ if ( cp_values == 0 ) { /* image distortion - translate the image */ coeff[0] = 1.0; coeff[2] = arguments[0] - arguments[2]; coeff[4] = 1.0; coeff[5] = arguments[1] - arguments[3]; } else { /* sparse gradient - use the values directly */ for (i=0; i<number_values; i++) coeff[i*3+2] = arguments[cp_values+i]; } } else { /* 2 or more points (usally 3) given. Solve a least squares simultaneous equation for coefficients. */ double **matrix, **vectors, terms[3]; MagickBooleanType status; /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(3UL,3UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*3]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),3UL,number_values); } if ( number_arguments == 2*cp_size ) { /* Only two pairs were given, but we need 3 to solve the affine. Fake extra coordinates by rotating p1 around p0 by 90 degrees. x2 = x0 - (y1-y0) y2 = y0 + (x1-x0) */ terms[0] = arguments[cp_x] - ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */ terms[1] = arguments[cp_y] + + ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */ terms[2] = 1; /* 1 */ if ( cp_values == 0 ) { /* Image Distortion - rotate the u,v coordients too */ double uv2[2]; uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */ uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */ LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL); } else { /* Sparse Gradient - use values of p0 for linear gradient */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[cp_values]),3UL,number_values); } } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,3UL,number_values); matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } } return(coeff); } case AffineProjectionDistortion: { /* Arguments: Affine Matrix (forward mapping) Arguments sx, rx, ry, sy, tx, ty Where u = sx*x + ry*y + tx v = rx*x + sy*y + ty Returns coefficients (in there inverse form) ordered as... sx ry tx rx sy ty AffineProjection Distortion Notes... + Will only work with a 2 number_values for Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ double inverse[8]; if (number_arguments != 6) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs 6 coeff values'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */ for(i=0; i<6UL; i++ ) inverse[i] = arguments[i]; AffineArgsToCoefficients(inverse); /* map into coefficents */ InvertAffineCoefficients(inverse, coeff); /* invert */ *method = AffineDistortion; return(coeff); } case ScaleRotateTranslateDistortion: { /* Scale, Rotate and Translate Distortion An alternative Affine Distortion Argument options, by number of arguments given: 7: x,y, sx,sy, a, nx,ny 6: x,y, s, a, nx,ny 5: x,y, sx,sy, a 4: x,y, s, a 3: x,y, a 2: s, a 1: a Where actions are (in order of application) x,y 'center' of transforms (default = image center) sx,sy scale image by this amount (default = 1) a angle of rotation (argument required) nx,ny move 'center' here (default = x,y or no movement) And convert to affine mapping coefficients ScaleRotateTranslate Distortion Notes... + Does not use a set of CPs in any normal way + Will only work with a 2 number_valuesal Image Distortion + Cannot be used for generating a sparse gradient (interpolation) */ double cosine, sine, x,y,sx,sy,a,nx,ny; /* set default center, and default scale */ x = nx = (double)(image->columns)/2.0 + (double)image->page.x; y = ny = (double)(image->rows)/2.0 + (double)image->page.y; sx = sy = 1.0; switch ( number_arguments ) { case 0: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs at least 1 argument'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); case 1: a = arguments[0]; break; case 2: sx = sy = arguments[0]; a = arguments[1]; break; default: x = nx = arguments[0]; y = ny = arguments[1]; switch ( number_arguments ) { case 3: a = arguments[2]; break; case 4: sx = sy = arguments[2]; a = arguments[3]; break; case 5: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; break; case 6: sx = sy = arguments[2]; a = arguments[3]; nx = arguments[4]; ny = arguments[5]; break; case 7: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; nx = arguments[5]; ny = arguments[6]; break; default: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Too Many Arguments (7 or less)'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } break; } /* Trap if sx or sy == 0 -- image is scaled out of existance! */ if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Zero Scale Given'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Save the given arguments as an affine distortion */ a=DegreesToRadians(a); cosine=cos(a); sine=sin(a); *method = AffineDistortion; coeff[0]=cosine/sx; coeff[1]=sine/sx; coeff[2]=x-nx*coeff[0]-ny*coeff[1]; coeff[3]=(-sine)/sy; coeff[4]=cosine/sy; coeff[5]=y-nx*coeff[3]-ny*coeff[4]; return(coeff); } case PerspectiveDistortion: { /* Perspective Distortion (a ratio of affine distortions) p(x,y) c0*x + c1*y + c2 u = ------ = ------------------ r(x,y) c6*x + c7*y + 1 q(x,y) c3*x + c4*y + c5 v = ------ = ------------------ r(x,y) c6*x + c7*y + 1 c8 = Sign of 'r', or the denominator affine, for the actual image. This determines what part of the distorted image is 'ground' side of the horizon, the other part is 'sky' or invalid. Valid values are +1.0 or -1.0 only. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... Perspective Distortion Notes... + Can be thought of as ratio of 3 affine transformations + Not separatable: r() or c6 and c7 are used by both equations + All 8 coefficients must be determined simultaniously + Will only work with a 2 number_valuesal Image Distortion + Can not be used for generating a sparse gradient (interpolation) + It is not linear, but is simple to generate an inverse + All lines within an image remain lines. + but distances between points may vary. */ double **matrix, *vectors[1], terms[8]; size_t cp_u = cp_values, cp_v = cp_values+1; MagickBooleanType status; if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* fake 1x8 vectors matrix directly using the coefficients array */ vectors[0] = &(coeff[0]); /* 8x8 least-squares matrix (zeroed) */ matrix = AcquireMagickMatrix(8UL,8UL); if (matrix == (double **) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* Add control points for least squares solving */ for (i=0; i < number_arguments; i+=4) { terms[0]=arguments[i+cp_x]; /* c0*x */ terms[1]=arguments[i+cp_y]; /* c1*y */ terms[2]=1.0; /* c2*1 */ terms[3]=0.0; terms[4]=0.0; terms[5]=0.0; terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */ terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]), 8UL,1UL); terms[0]=0.0; terms[1]=0.0; terms[2]=0.0; terms[3]=arguments[i+cp_x]; /* c3*x */ terms[4]=arguments[i+cp_y]; /* c4*y */ terms[5]=1.0; /* c5*1 */ terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */ terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]), 8UL,1UL); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,8UL,1UL); matrix = RelinquishMagickMatrix(matrix, 8UL); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image coordinate (first control point) in destination for determination of what part of view is 'ground'. */ coeff[8] = coeff[6]*arguments[cp_x] + coeff[7]*arguments[cp_y] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; return(coeff); } case PerspectiveProjectionDistortion: { /* Arguments: Perspective Coefficents (forward mapping) */ if (number_arguments != 8) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'Needs 8 coefficient values'", CommandOptionToMnemonic(MagickDistortOptions, *method)); return((double *) NULL); } /* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */ InvertPerspectiveCoefficients(arguments, coeff); /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image cocodinate in destination for determination. For a forward mapped perspective the images 0,0 coord will map to c2,c5 in the distorted image, so set the sign of denominator of that. */ coeff[8] = coeff[6]*arguments[2] + coeff[7]*arguments[5] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; *method = PerspectiveDistortion; return(coeff); } case BilinearForwardDistortion: case BilinearReverseDistortion: { /* Bilinear Distortion (Forward mapping) v = c0*x + c1*y + c2*x*y + c3; for each 'value' given This is actually a simple polynomial Distortion! The difference however is when we need to reverse the above equation to generate a BilinearForwardDistortion (see below). Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ double **matrix, **vectors, terms[4]; MagickBooleanType status; /* check the number of arguments */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(4UL,4UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x4 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*4]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = terms[0]*terms[1]; /* x*y */ terms[3] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),4UL,number_values); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,4UL,number_values); matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( *method == BilinearForwardDistortion ) { /* Bilinear Forward Mapped Distortion The above least-squares solved for coefficents but in the forward direction, due to changes to indexing constants. i = c0*x + c1*y + c2*x*y + c3; j = c4*x + c5*y + c6*x*y + c7; where i,j are in the destination image, NOT the source. Reverse Pixel mapping however needs to use reverse of these functions. It required a full page of algbra to work out the reversed mapping formula, but resolves down to the following... c8 = c0*c5-c1*c4; c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula i = i - c3; j = j - c7; b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0 c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a) r = b*b - c9*(c+c); if ( c9 != 0 ) y = ( -b + sqrt(r) ) / c9; else y = -c/b; x = ( i - c1*y) / ( c1 - c2*y ); NB: if 'r' is negative there is no solution! NB: the sign of the sqrt() should be negative if image becomes flipped or flopped, or crosses over itself. NB: techniqually coefficient c5 is not needed, anymore, but kept for completness. See Anthony Thyssen <A.Thyssen@griffith.edu.au> or Fred Weinhaus <fmw@alink.net> for more details. */ coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4]; coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]); } return(coeff); } #if 0 case QuadrilateralDistortion: { /* Map a Quadrilateral to a unit square using BilinearReverse Then map that unit square back to the final Quadrilateral using BilinearForward. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ /* UNDER CONSTRUCTION */ return(coeff); } #endif case PolynomialDistortion: { /* Polynomial Distortion First two coefficents are used to hole global polynomal information c0 = Order of the polynimial being created c1 = number_of_terms in one polynomial equation Rest of the coefficients map to the equations.... v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ... for each 'value' (number_values of them) given. As such total coefficients = 2 + number_terms * number_values Input Arguments are sets of control points... For Distort Images order [u,v, x,y] ... For Sparse Gradients order [x,y, r,g,b] ... Polynomial Distortion Notes... + UNDER DEVELOPMENT -- Do not expect this to remain as is. + Currently polynomial is a reversed mapped distortion. + Order 1.5 is fudged to map into a bilinear distortion. though it is not the same order as that distortion. */ double **matrix, **vectors, *terms; size_t nterms; /* number of polynomial terms per number_values */ register ssize_t j; MagickBooleanType status; /* first two coefficients hold polynomial order information */ coeff[0] = arguments[0]; coeff[1] = (double) poly_number_terms(arguments[0]); nterms = (size_t) coeff[1]; /* create matrix, a fake vectors matrix, and least sqs terms */ matrix = AcquireMagickMatrix(nterms,nterms); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms)); if (matrix == (double **) NULL || vectors == (double **) NULL || terms == (double *) NULL ) { matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); terms = (double *) RelinquishMagickMemory(terms); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[2+i*nterms]); /* Add given control point pairs for least squares solving */ for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */ for (j=0; j < (ssize_t) nterms; j++) terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]); LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),nterms,number_values); } terms = (double *) RelinquishMagickMemory(terms); /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,nterms,number_values); matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } return(coeff); } case ArcDistortion: { /* Arc Distortion Args: arc_width rotate top_edge_radius bottom_edge_radius All but first argument are optional arc_width The angle over which to arc the image side-to-side rotate Angle to rotate image from vertical center top_radius Set top edge of source image at this radius bottom_radius Set bootom edge to this radius (radial scaling) By default, if the radii arguments are nor provided the image radius is calculated so the horizontal center-line is fits the given arc without scaling. The output image size is ALWAYS adjusted to contain the whole image, and an offset is given to position image relative to the 0,0 point of the origin, allowing users to use relative positioning onto larger background (via -flatten). The arguments are converted to these coefficients c0: angle for center of source image c1: angle scale for mapping to source image c2: radius for top of source image c3: radius scale for mapping source image c4: centerline of arc within source image Note the coefficients use a center angle, so asymptotic join is furthest from both sides of the source image. This also means that for arc angles greater than 360 the sides of the image will be trimmed equally. Arc Distortion Notes... + Does not use a set of CPs + Will only work with Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Arc Angle Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Outer Radius Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } coeff[0] = -MagickPI2; /* -90, place at top! */ if ( number_arguments >= 1 ) coeff[1] = DegreesToRadians(arguments[0]); else coeff[1] = MagickPI2; /* zero arguments - center is at top */ if ( number_arguments >= 2 ) coeff[0] += DegreesToRadians(arguments[1]); coeff[0] /= Magick2PI; /* normalize radians */ coeff[0] -= MagickRound(coeff[0]); coeff[0] *= Magick2PI; /* de-normalize back to radians */ coeff[3] = (double)image->rows-1; coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0; if ( number_arguments >= 3 ) { if ( number_arguments >= 4 ) coeff[3] = arguments[2] - arguments[3]; else coeff[3] *= arguments[2]/coeff[2]; coeff[2] = arguments[2]; } coeff[4] = ((double)image->columns-1.0)/2.0; return(coeff); } case PolarDistortion: case DePolarDistortion: { /* (De)Polar Distortion (same set of arguments) Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato DePolar can also have the extra arguments of Width, Height Coefficients 0 to 5 is the sanatized version first 6 input args Coefficient 6 is the angle to coord ratio and visa-versa Coefficient 7 is the radius to coord ratio and visa-versa WARNING: It is possible for Radius max<min and/or Angle from>to */ if ( number_arguments == 3 || ( number_arguments > 6 && *method == PolarDistortion ) || number_arguments > 8 ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* Rmax - if 0 calculate appropriate value */ if ( number_arguments >= 1 ) coeff[0] = arguments[0]; else coeff[0] = 0.0; /* Rmin - usally 0 */ coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0; /* Center X,Y */ if ( number_arguments >= 4 ) { coeff[2] = arguments[2]; coeff[3] = arguments[3]; } else { /* center of actual image */ coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; } /* Angle from,to - about polar center 0 is downward */ coeff[4] = -MagickPI; if ( number_arguments >= 5 ) coeff[4] = DegreesToRadians(arguments[4]); coeff[5] = coeff[4]; if ( number_arguments >= 6 ) coeff[5] = DegreesToRadians(arguments[5]); if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon ) coeff[5] += Magick2PI; /* same angle is a full circle */ /* if radius 0 or negative, its a special value... */ if ( coeff[0] < MagickEpsilon ) { /* Use closest edge if radius == 0 */ if ( fabs(coeff[0]) < MagickEpsilon ) { coeff[0]=MagickMin(fabs(coeff[2]-image->page.x), fabs(coeff[3]-image->page.y)); coeff[0]=MagickMin(coeff[0], fabs(coeff[2]-image->page.x-image->columns)); coeff[0]=MagickMin(coeff[0], fabs(coeff[3]-image->page.y-image->rows)); } /* furthest diagonal if radius == -1 */ if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) { double rx,ry; rx = coeff[2]-image->page.x; ry = coeff[3]-image->page.y; coeff[0] = rx*rx+ry*ry; ry = coeff[3]-image->page.y-image->rows; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); rx = coeff[2]-image->page.x-image->columns; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); ry = coeff[3]-image->page.y; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); coeff[0] = sqrt(coeff[0]); } } /* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */ if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon || (coeff[0]-coeff[1]) < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid Radius", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* converstion ratios */ if ( *method == PolarDistortion ) { coeff[6]=(double) image->columns/(coeff[5]-coeff[4]); coeff[7]=(double) image->rows/(coeff[0]-coeff[1]); } else { /* *method == DePolarDistortion */ coeff[6]=(coeff[5]-coeff[4])/image->columns; coeff[7]=(coeff[0]-coeff[1])/image->rows; } return(coeff); } case Cylinder2PlaneDistortion: case Plane2CylinderDistortion: { /* 3D Cylinder to/from a Tangential Plane Projection between a clinder and flat plain from a point on the center line of the cylinder. The two surfaces coincide in 3D space at the given centers of distortion (perpendicular to projection point) on both images. Args: FOV_arc_width Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y FOV (Field Of View) the angular field of view of the distortion, across the width of the image, in degrees. The centers are the points of least distortion in the input and resulting images. These centers are however determined later. Coeff 0 is the FOV angle of view of image width in radians Coeff 1 is calculated radius of cylinder. Coeff 2,3 center of distortion of input image Coefficents 4,5 Center of Distortion of dest (determined later) */ if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid FOV Angle", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } coeff[0] = DegreesToRadians(arguments[0]); if ( *method == Cylinder2PlaneDistortion ) /* image is curved around cylinder, so FOV angle (in radians) * scales directly to image X coordinate, according to its radius. */ coeff[1] = (double) image->columns/coeff[0]; else /* radius is distance away from an image with this angular FOV */ coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) ); coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; coeff[4] = coeff[2]; coeff[5] = coeff[3]; /* assuming image size is the same */ return(coeff); } case BarrelDistortion: case BarrelInverseDistortion: { /* Barrel Distortion Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd BarrelInv Distortion Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D) Where Rd is the normalized radius from corner to middle of image Input Arguments are one of the following forms (number of arguments)... 3: A,B,C 4: A,B,C,D 5: A,B,C X,Y 6: A,B,C,D X,Y 8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy 10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y Returns 10 coefficent values, which are de-normalized (pixel scale) Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc */ /* Radius de-normalization scaling factor */ double rscale = 2.0/MagickMin((double) image->columns,(double) image->rows); /* sanity check number of args must = 3,4,5,6,8,10 or error */ if ( (number_arguments < 3) || (number_arguments == 7) || (number_arguments == 9) || (number_arguments > 10) ) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* A,B,C,D coefficients */ coeff[0] = arguments[0]; coeff[1] = arguments[1]; coeff[2] = arguments[2]; if ((number_arguments == 3) || (number_arguments == 5) ) coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2]; else coeff[3] = arguments[3]; /* de-normalize the coefficients */ coeff[0] *= pow(rscale,3.0); coeff[1] *= rscale*rscale; coeff[2] *= rscale; /* Y coefficients: as given OR same as X coefficients */ if ( number_arguments >= 8 ) { coeff[4] = arguments[4] * pow(rscale,3.0); coeff[5] = arguments[5] * rscale*rscale; coeff[6] = arguments[6] * rscale; coeff[7] = arguments[7]; } else { coeff[4] = coeff[0]; coeff[5] = coeff[1]; coeff[6] = coeff[2]; coeff[7] = coeff[3]; } /* X,Y Center of Distortion (image coodinates) */ if ( number_arguments == 5 ) { coeff[8] = arguments[3]; coeff[9] = arguments[4]; } else if ( number_arguments == 6 ) { coeff[8] = arguments[4]; coeff[9] = arguments[5]; } else if ( number_arguments == 10 ) { coeff[8] = arguments[8]; coeff[9] = arguments[9]; } else { /* center of the image provided (image coodinates) */ coeff[8] = (double)image->columns/2.0 + image->page.x; coeff[9] = (double)image->rows/2.0 + image->page.y; } return(coeff); } case ShepardsDistortion: { /* Shepards Distortion input arguments are the coefficents! Just check the number of arguments is valid! Args: u1,v1, x1,y1, ... OR : u1,v1, r1,g1,c1, ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'requires CP's (4 numbers each)'", CommandOptionToMnemonic(MagickDistortOptions, *method)); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* User defined weighting power for Shepard's Method */ { const char *artifact=GetImageArtifact(image,"shepards:power"); if ( artifact != (const char *) NULL ) { coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0; if ( coeff[0] < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument","%s", "-define shepards:power" ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } } else coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */ } return(coeff); } default: break; } /* you should never reach this point */ perror("no method handler"); /* just fail assertion */ return((double *) NULL); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s t o r t R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortResizeImage() resize image using the equivalent but slower image % distortion operator. The filter is applied using a EWA cylindrical % resampling. But like resize the final image size is limited to whole pixels % with no effects by virtual-pixels on the result. % % Note that images containing a transparency channel will be twice as slow to % resize as images one without transparency. % % The format of the DistortResizeImage method is: % % Image *DistortResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *DistortResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { #define DistortResizeImageTag "Distort/Image" Image *resize_image, *tmp_image; RectangleInfo crop_area; double distort_args[12]; VirtualPixelMethod vp_save; /* Distort resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); /* Do not short-circuit this resize if final image size is unchanged */ (void) memset(distort_args,0,12*sizeof(double)); distort_args[4]=(double) image->columns; distort_args[6]=(double) columns; distort_args[9]=(double) image->rows; distort_args[11]=(double) rows; vp_save=GetImageVirtualPixelMethod(image); tmp_image=CloneImage(image,0,0,MagickTrue,exception); if ( tmp_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod); if (image->matte == MagickFalse) { /* Image has not transparency channel, so we free to use it */ (void) SetImageAlphaChannel(tmp_image,SetAlphaChannel); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if ( resize_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel); InheritException(exception,&image->exception); } else { /* Image has transparency so handle colors and alpha separatly. Basically we need to separate Virtual-Pixel alpha in the resized image, so only the actual original images alpha channel is used. */ Image *resize_alpha; /* distort alpha channel separately */ (void) SeparateImageChannel(tmp_image,TrueAlphaChannel); (void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel); resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if ( resize_alpha == (Image *) NULL ) return((Image *) NULL); /* distort the actual image containing alpha + VP alpha */ tmp_image=CloneImage(image,0,0,MagickTrue,exception); if ( tmp_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image, TransparentVirtualPixelMethod); (void) SetImageVirtualPixelMethod(tmp_image, TransparentVirtualPixelMethod); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if ( resize_image == (Image *) NULL) { resize_alpha=DestroyImage(resize_alpha); return((Image *) NULL); } /* replace resize images alpha with the separally distorted alpha */ (void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel); (void) SetImageAlphaChannel(resize_alpha,DeactivateAlphaChannel); (void) CompositeImage(resize_image,CopyOpacityCompositeOp,resize_alpha, 0,0); InheritException(exception,&resize_image->exception); resize_alpha=DestroyImage(resize_alpha); } (void) SetImageVirtualPixelMethod(resize_image,vp_save); /* Clean up the results of the Distortion */ crop_area.width=columns; crop_area.height=rows; crop_area.x=0; crop_area.y=0; tmp_image=resize_image; resize_image=CropImage(tmp_image,&crop_area,exception); tmp_image=DestroyImage(tmp_image); if (resize_image != (Image *) NULL) { resize_image->matte=image->matte; resize_image->compose=image->compose; resize_image->page.width=0; resize_image->page.height=0; } return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D i s t o r t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortImage() distorts an image using various distortion methods, by % mapping color lookups of the source image to a new destination image % usally of the same size as the source image, unless 'bestfit' is set to % true. % % If 'bestfit' is enabled, and distortion allows it, the destination image is % adjusted to ensure the whole source 'image' will just fit within the final % destination image, which will be sized and offset accordingly. Also in % many cases the virtual offset of the source image will be taken into % account in the mapping. % % If the '-verbose' control option has been set print to standard error the % equicelent '-fx' formula with coefficients for the function, if practical. % % The format of the DistortImage() method is: % % Image *DistortImage(const Image *image,const DistortImageMethod method, % const size_t number_arguments,const double *arguments, % MagickBooleanType bestfit, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion. % % ArcDistortion always ignores source image offset, and always % 'bestfit' the destination image with the top left corner offset % relative to the polar mapping center. % % Affine, Perspective, and Bilinear, do least squares fitting of the % distrotion when more than the minimum number of control point pairs % are provided. % % Perspective, and Bilinear, fall back to a Affine distortion when less % than 4 control point pairs are provided. While Affine distortions % let you use any number of control point pairs, that is Zero pairs is % a No-Op (viewport only) distortion, one pair is a translation and % two pairs of control points do a scale-rotate-translate, without any % shearing. % % o number_arguments: the number of arguments given. % % o arguments: an array of floating point arguments for this method. % % o bestfit: Attempt to 'bestfit' the size of the resulting image. % This also forces the resulting image to be a 'layered' virtual % canvas image. Can be overridden using 'distort:viewport' setting. % % o exception: return any errors or warnings in this structure % % Extra Controls from Image meta-data (artifacts)... % % o "verbose" % Output to stderr alternatives, internal coefficents, and FX % equivalents for the distortion operation (if feasible). % This forms an extra check of the distortion method, and allows users % access to the internal constants IM calculates for the distortion. % % o "distort:viewport" % Directly set the output image canvas area and offest to use for the % resulting image, rather than use the original images canvas, or a % calculated 'bestfit' canvas. % % o "distort:scale" % Scale the size of the output canvas by this amount to provide a % method of Zooming, and for super-sampling the results. % % Other settings that can effect results include % % o 'interpolate' For source image lookups (scale enlargements) % % o 'filter' Set filter to use for area-resampling (scale shrinking). % Set to 'point' to turn off and use 'interpolate' lookup % instead % */ MagickExport Image *DistortImage(const Image *image,DistortImageMethod method, const size_t number_arguments,const double *arguments, MagickBooleanType bestfit,ExceptionInfo *exception) { #define DistortImageTag "Distort/Image" double *coeff, output_scaling; Image *distort_image; RectangleInfo geometry; /* geometry of the distorted space viewport */ MagickBooleanType viewport_given; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Handle Special Compound Distortions */ if (method == ResizeDistortion) { if (number_arguments != 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Resize", "Invalid number of args: 2 only"); return((Image *) NULL); } distort_image=DistortResizeImage(image,(size_t) arguments[0], (size_t) arguments[1],exception); return(distort_image); } /* Convert input arguments (usually as control points for reverse mapping) into mapping coefficients to apply the distortion. Note that some distortions are mapped to other distortions, and as such do not require specific code after this point. */ coeff=GenerateCoefficients(image,&method,number_arguments,arguments,0, exception); if (coeff == (double *) NULL) return((Image *) NULL); /* Determine the size and offset for a 'bestfit' destination. Usally the four corners of the source image is enough. */ /* default output image bounds, when no 'bestfit' is requested */ geometry.width=image->columns; geometry.height=image->rows; geometry.x=0; geometry.y=0; if ( method == ArcDistortion ) { bestfit = MagickTrue; /* always calculate a 'best fit' viewport */ } /* Work out the 'best fit', (required for ArcDistortion) */ if ( bestfit ) { PointInfo s,d,min,max; /* source, dest coords --mapping--> min, max coords */ MagickBooleanType fix_bounds = MagickTrue; /* enlarge bounds for VP handling */ s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */ /* defines to figure out the bounds of the distorted image */ #define InitalBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = max.x = p.x; \ min.y = max.y = p.y; \ } #define ExpandBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = MagickMin(min.x,p.x); \ max.x = MagickMax(max.x,p.x); \ min.y = MagickMin(min.y,p.y); \ max.y = MagickMax(max.y,p.y); \ } switch (method) { case AffineDistortion: { double inverse[6]; InvertAffineCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); break; } case PerspectiveDistortion: { double inverse[8], scale; InvertPerspectiveCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); break; } case ArcDistortion: { double a, ca, sa; /* Forward Map Corners */ a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; InitalBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); /* Orthogonal points along top of arc */ for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2); a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) { ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); } /* Convert the angle_to_width and radius_to_height to appropriate scaling factors, to allow faster processing in the mapping function. */ coeff[1] = (double) (Magick2PI*image->columns/coeff[1]); coeff[3] = (double)image->rows/coeff[3]; break; } case PolarDistortion: { if (number_arguments < 2) coeff[2] = coeff[3] = 0.0; min.x = coeff[2]-coeff[0]; max.x = coeff[2]+coeff[0]; min.y = coeff[3]-coeff[0]; max.y = coeff[3]+coeff[0]; /* should be about 1.0 if Rmin = 0 */ coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]); break; } case DePolarDistortion: { /* direct calculation as it needs to tile correctly * for reversibility in a DePolar-Polar cycle */ fix_bounds = MagickFalse; geometry.x = geometry.y = 0; geometry.height = (size_t) ceil(coeff[0]-coeff[1]); geometry.width = (size_t) ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5); /* correct scaling factors relative to new size */ coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */ coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */ break; } case Cylinder2PlaneDistortion: { /* direct calculation so center of distortion is either a pixel * center, or pixel edge. This allows for reversibility of the * distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) ); geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) ); /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case Plane2CylinderDistortion: { /* direct calculation center is either pixel center, or pixel edge * so as to allow reversibility of the image distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */ geometry.height = (size_t) (2*coeff[3]); /* input image height */ /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case ShepardsDistortion: case BilinearForwardDistortion: case BilinearReverseDistortion: #if 0 case QuadrilateralDistortion: #endif case PolynomialDistortion: case BarrelDistortion: case BarrelInverseDistortion: default: /* no calculated bestfit available for these distortions */ bestfit = MagickFalse; fix_bounds = MagickFalse; break; } /* Set the output image geometry to calculated 'bestfit'. Yes this tends to 'over do' the file image size, ON PURPOSE! Do not do this for DePolar which needs to be exact for virtual tiling. */ if ( fix_bounds ) { geometry.x = (ssize_t) floor(min.x-0.5); geometry.y = (ssize_t) floor(min.y-0.5); geometry.width=(size_t) ceil(max.x-geometry.x+0.5); geometry.height=(size_t) ceil(max.y-geometry.y+0.5); } } /* end bestfit destination image calculations */ /* The user provided a 'viewport' expert option which may overrides some parts of the current output image geometry. This also overrides its default 'bestfit' setting. */ { const char *artifact=GetImageArtifact(image,"distort:viewport"); viewport_given = MagickFalse; if ( artifact != (const char *) NULL ) { MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry); if (flags==NoValue) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidGeometry","`%s' `%s'", "distort:viewport",artifact); else viewport_given = MagickTrue; } } /* Verbose output */ if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) { register ssize_t i; char image_gen[MaxTextExtent]; const char *lookup; /* Set destination image size and virtual offset */ if ( bestfit || viewport_given ) { (void) FormatLocaleString(image_gen, MaxTextExtent," -size %.20gx%.20g " "-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width, (double) geometry.height,(double) geometry.x,(double) geometry.y); lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }"; } else { image_gen[0] = '\0'; /* no destination to generate */ lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */ } switch (method) { case AffineDistortion: { double *inverse; inverse=(double *) AcquireQuantumMemory(6,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s","DistortImages"); return((Image *) NULL); } InvertAffineCoefficients(coeff, inverse); CoefficientsToAffineArgs(inverse); (void) FormatLocaleFile(stderr, "Affine Projection:\n"); (void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '"); for (i=0; i < 5; i++) (void) FormatLocaleFile(stderr, "%lf,", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]); inverse=(double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr," xx=%+lf*ii %+lf*jj %+lf;\n", coeff[0],coeff[1],coeff[2]); (void) FormatLocaleFile(stderr," yy=%+lf*ii %+lf*jj %+lf;\n", coeff[3],coeff[4],coeff[5]); (void) FormatLocaleFile(stderr," %s' \\\n",lookup); break; } case PerspectiveDistortion: { double *inverse; inverse=(double *) AcquireQuantumMemory(8,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", "DistortCoefficients"); return((Image *) NULL); } InvertPerspectiveCoefficients(coeff, inverse); (void) FormatLocaleFile(stderr,"Perspective Projection:\n"); (void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '"); for (i=0; i < 4; i++) (void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(), inverse[i]); (void) FormatLocaleFile(stderr, "\n "); for ( ; i < 7; i++) (void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(), inverse[i]); (void) FormatLocaleFile(stderr, "%.*g'\n",GetMagickPrecision(), inverse[7]); inverse=(double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr,"Perspective Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%.1024s",image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr," rr=%+.*g*ii %+.*g*jj + 1;\n", GetMagickPrecision(),coeff[6],GetMagickPrecision(),coeff[7]); (void) FormatLocaleFile(stderr, " xx=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n", GetMagickPrecision(),coeff[0],GetMagickPrecision(),coeff[1], GetMagickPrecision(),coeff[2]); (void) FormatLocaleFile(stderr, " yy=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n", GetMagickPrecision(),coeff[3],GetMagickPrecision(),coeff[4], GetMagickPrecision(),coeff[5]); (void) FormatLocaleFile(stderr," rr%s0 ? %s : blue' \\\n", coeff[8] < 0.0 ? "<" : ">", lookup); break; } case BilinearForwardDistortion: { (void) FormatLocaleFile(stderr,"BilinearForward Mapping Equations:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr," i = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[0],coeff[1],coeff[2],coeff[3]); (void) FormatLocaleFile(stderr," j = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[4],coeff[5],coeff[6],coeff[7]); #if 0 /* for debugging */ (void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n", coeff[8], coeff[9]); #endif (void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",0.5-coeff[3],0.5- coeff[7]); (void) FormatLocaleFile(stderr," bb=%lf*ii %+lf*jj %+lf;\n", coeff[6], -coeff[2], coeff[8]); /* Handle Special degenerate (non-quadratic) or trapezoidal case */ if (coeff[9] != 0) { (void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",-2*coeff[9],coeff[4], -coeff[0]); (void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n",coeff[9]); } else (void) FormatLocaleFile(stderr," yy=(%lf*ii%+lf*jj)/bb;\n", -coeff[4],coeff[0]); (void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",-coeff[1],coeff[0], coeff[2]); if ( coeff[9] != 0 ) (void) FormatLocaleFile(stderr," (rt < 0 ) ? red : %s'\n", lookup); else (void) FormatLocaleFile(stderr," %s' \\\n", lookup); break; } case BilinearReverseDistortion: { #if 0 (void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n"); (void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n"); (void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n", coeff[3], coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n", coeff[7], coeff[4], coeff[5], coeff[6]); #endif (void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[0],coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[4],coeff[5], coeff[6], coeff[7]); (void) FormatLocaleFile(stderr," %s' \\\n", lookup); break; } case PolynomialDistortion: { size_t nterms = (size_t) coeff[1]; (void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n",coeff[0], (unsigned long) nterms); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx ="); for (i=0; i < (ssize_t) nterms; i++) { if ((i != 0) && (i%4 == 0)) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i], poly_basis_str(i)); } (void) FormatLocaleFile(stderr,";\n yy ="); for (i=0; i < (ssize_t) nterms; i++) { if ((i != 0) && (i%4 == 0)) (void) FormatLocaleFile(stderr,"\n "); (void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i+nterms], poly_basis_str(i)); } (void) FormatLocaleFile(stderr,";\n %s' \\\n", lookup); break; } case ArcDistortion: { (void) FormatLocaleFile(stderr,"Arc Distort, Internal Coefficients:\n"); for (i=0; i < 5; i++) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n",(double) i,coeff[i]); (void) FormatLocaleFile(stderr,"Arc Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr," -fx 'ii=i+page.x; jj=j+page.y;\n"); (void) FormatLocaleFile(stderr," xx=(atan2(jj,ii)%+lf)/(2*pi);\n", -coeff[0]); (void) FormatLocaleFile(stderr," xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr," xx=xx*%lf %+lf;\n",coeff[1], coeff[4]); (void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n",coeff[2],coeff[3]); (void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n"); break; } case PolarDistortion: { (void) FormatLocaleFile(stderr,"Polar Distort, Internal Coefficents\n"); for (i=0; i < 8; i++) (void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i, coeff[i]); (void) FormatLocaleFile(stderr,"Polar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",-coeff[2],-coeff[3]); (void) FormatLocaleFile(stderr," xx=(atan2(ii,jj)%+lf)/(2*pi);\n", -(coeff[4]+coeff[5])/2 ); (void) FormatLocaleFile(stderr," xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr," xx=xx*2*pi*%lf + v.w/2;\n", coeff[6] ); (void) FormatLocaleFile(stderr," yy=(hypot(ii,jj)%+lf)*%lf;\n", -coeff[1],coeff[7] ); (void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n"); break; } case DePolarDistortion: { (void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n"); for (i=0; i < 8; i++) (void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i, coeff[i]); (void) FormatLocaleFile(stderr,"DePolar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr," -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6],+coeff[4]); (void) FormatLocaleFile(stderr," rr=(j+.5)*%lf %+lf;\n", coeff[7],+coeff[1]); (void) FormatLocaleFile(stderr," xx=rr*sin(aa) %+lf;\n", coeff[2]); (void) FormatLocaleFile(stderr," yy=rr*cos(aa) %+lf;\n", coeff[3]); (void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n"); break; } case Cylinder2PlaneDistortion: { (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]); (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr," aa=atan(ii/%+lf);\n",coeff[1]); (void) FormatLocaleFile(stderr," xx=%lf*aa%+lf;\n", coeff[1],coeff[2]); (void) FormatLocaleFile(stderr," yy=jj*cos(aa)%+lf;\n",coeff[3]); (void) FormatLocaleFile(stderr," %s' \\\n", lookup); break; } case Plane2CylinderDistortion: { (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]); (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr," ii=ii/%+lf;\n",coeff[1]); (void) FormatLocaleFile(stderr," xx=%lf*tan(ii)%+lf;\n",coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr," yy=jj/cos(ii)%+lf;\n",coeff[3]); (void) FormatLocaleFile(stderr," %s' \\\n", lookup); break; } case BarrelDistortion: case BarrelInverseDistortion: { double xc, yc; /* NOTE: This does the barrel roll in pixel coords not image coords The internal distortion must do it in image coordinates, so that is what the center coeff (8,9) is given in. */ xc=((double)image->columns-1.0)/2.0+image->page.x; yc=((double)image->rows-1.0)/2.0+image->page.y; (void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n", method == BarrelDistortion ? "" : "Inv"); (void) FormatLocaleFile(stderr, "%s", image_gen); if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 ) (void) FormatLocaleFile(stderr," -fx 'xc=(w-1)/2; yc=(h-1)/2;\n"); else (void) FormatLocaleFile(stderr," -fx 'xc=%lf; yc=%lf;\n",coeff[8]- 0.5,coeff[9]-0.5); (void) FormatLocaleFile(stderr, " ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n"); (void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/",coeff[0],coeff[1],coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/",coeff[4],coeff[5],coeff[6], coeff[7]); (void) FormatLocaleFile(stderr," v.p{fx*ii+xc,fy*jj+yc}' \\\n"); } default: break; } } /* The user provided a 'scale' expert option will scale the output image size, by the factor given allowing for super-sampling of the distorted image space. Any scaling factors must naturally be halved as a result. */ { const char *artifact; artifact=GetImageArtifact(image,"distort:scale"); output_scaling = 1.0; if (artifact != (const char *) NULL) { output_scaling = fabs(StringToDouble(artifact,(char **) NULL)); geometry.width=(size_t) (output_scaling*geometry.width+0.5); geometry.height=(size_t) (output_scaling*geometry.height+0.5); geometry.x=(ssize_t) (output_scaling*geometry.x+0.5); geometry.y=(ssize_t) (output_scaling*geometry.y+0.5); if ( output_scaling < 0.1 ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument","%s","-define distort:scale" ); return((Image *) NULL); } output_scaling = 1/output_scaling; } } #define ScaleFilter(F,A,B,C,D) \ ScaleResampleFilter( (F), \ output_scaling*(A), output_scaling*(B), \ output_scaling*(C), output_scaling*(D) ) /* Initialize the distort image attributes. */ distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue, exception); if (distort_image == (Image *) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); return((Image *) NULL); } /* if image is ColorMapped - change it to DirectClass */ if (SetImageStorageClass(distort_image,DirectClass) == MagickFalse) { coeff=(double *) RelinquishMagickMemory(coeff); InheritException(exception,&distort_image->exception); distort_image=DestroyImage(distort_image); return((Image *) NULL); } if ((IsPixelGray(&distort_image->background_color) == MagickFalse) && (IsGrayColorspace(distort_image->colorspace) != MagickFalse)) (void) SetImageColorspace(distort_image,sRGBColorspace); if (distort_image->background_color.opacity != OpaqueOpacity) distort_image->matte=MagickTrue; distort_image->page.x=geometry.x; distort_image->page.y=geometry.y; { /* ----- MAIN CODE ----- Sample the source image to each pixel in the distort image. */ CacheView *distort_view; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket zero; ResampleFilter **magick_restrict resample_filter; ssize_t j; status=MagickTrue; progress=0; GetMagickPixelPacket(distort_image,&zero); resample_filter=AcquireResampleFilterThreadSet(image, UndefinedVirtualPixelMethod,MagickFalse,exception); distort_view=AcquireAuthenticCacheView(distort_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,distort_image,distort_image->rows,1) #endif for (j=0; j < (ssize_t) distort_image->rows; j++) { const int id = GetOpenMPThreadId(); double validity; /* how mathematically valid is this the mapping */ MagickBooleanType sync; MagickPixelPacket pixel, /* pixel color to assign to distorted image */ invalid; /* the color to assign when distort result is invalid */ PointInfo d, s; /* transform destination image x,y to source image x,y */ register IndexPacket *magick_restrict indexes; register ssize_t i; register PixelPacket *magick_restrict q; q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(distort_view); pixel=zero; /* Define constant scaling vectors for Affine Distortions Other methods are either variable, or use interpolated lookup */ switch (method) { case AffineDistortion: ScaleFilter( resample_filter[id], coeff[0], coeff[1], coeff[3], coeff[4] ); break; default: break; } /* Initialize default pixel validity * negative: pixel is invalid output 'matte_color' * 0.0 to 1.0: antialiased, mix with resample output * 1.0 or greater: use resampled output. */ validity = 1.0; GetMagickPixelPacket(distort_image,&invalid); SetMagickPixelPacket(distort_image,&distort_image->matte_color, (IndexPacket *) NULL, &invalid); if (distort_image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&invalid); /* what about other color spaces? */ for (i=0; i < (ssize_t) distort_image->columns; i++) { /* map pixel coordinate to distortion space coordinate */ d.x = (double) (geometry.x+i+0.5)*output_scaling; d.y = (double) (geometry.y+j+0.5)*output_scaling; s = d; /* default is a no-op mapping */ switch (method) { case AffineDistortion: { s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; /* Affine partial derivitives are constant -- set above */ break; } case PerspectiveDistortion: { double p,q,r,abs_r,abs_c6,abs_c7,scale; /* perspective is a ratio of affines */ p=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; q=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; r=coeff[6]*d.x+coeff[7]*d.y+1.0; /* Pixel Validity -- is it a 'sky' or 'ground' pixel */ validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0; /* Determine horizon anti-alias blending */ abs_r = fabs(r)*2; abs_c6 = fabs(coeff[6]); abs_c7 = fabs(coeff[7]); if ( abs_c6 > abs_c7 ) { if ( abs_r < abs_c6*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling); } else if ( abs_r < abs_c7*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling); /* Perspective Sampling Point (if valid) */ if ( validity > 0.0 ) { /* divide by r affine, for perspective scaling */ scale = 1.0/r; s.x = p*scale; s.y = q*scale; /* Perspective Partial Derivatives or Scaling Vectors */ scale *= scale; ScaleFilter( resample_filter[id], (r*coeff[0] - p*coeff[6])*scale, (r*coeff[1] - p*coeff[7])*scale, (r*coeff[3] - q*coeff[6])*scale, (r*coeff[4] - q*coeff[7])*scale ); } break; } case BilinearReverseDistortion: { /* Reversed Mapped is just a simple polynomial */ s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3]; s.y=coeff[4]*d.x+coeff[5]*d.y +coeff[6]*d.x*d.y+coeff[7]; /* Bilinear partial derivitives of scaling vectors */ ScaleFilter( resample_filter[id], coeff[0] + coeff[2]*d.y, coeff[1] + coeff[2]*d.x, coeff[4] + coeff[6]*d.y, coeff[5] + coeff[6]*d.x ); break; } case BilinearForwardDistortion: { /* Forward mapped needs reversed polynomial equations * which unfortunatally requires a square root! */ double b,c; d.x -= coeff[3]; d.y -= coeff[7]; b = coeff[6]*d.x - coeff[2]*d.y + coeff[8]; c = coeff[4]*d.x - coeff[0]*d.y; validity = 1.0; /* Handle Special degenerate (non-quadratic) case * Currently without horizon anti-alising */ if ( fabs(coeff[9]) < MagickEpsilon ) s.y = -c/b; else { c = b*b - 2*coeff[9]*c; if ( c < 0.0 ) validity = 0.0; else s.y = ( -b + sqrt(c) )/coeff[9]; } if ( validity > 0.0 ) s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y ); /* NOTE: the sign of the square root should be -ve for parts where the source image becomes 'flipped' or 'mirrored'. FUTURE: Horizon handling FUTURE: Scaling factors or Deritives (how?) */ break; } #if 0 case BilinearDistortion: /* Bilinear mapping of any Quadrilateral to any Quadrilateral */ /* UNDER DEVELOPMENT */ break; #endif case PolynomialDistortion: { /* multi-ordered polynomial */ register ssize_t k; ssize_t nterms=(ssize_t)coeff[1]; PointInfo du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */ s.x=s.y=du.x=du.y=dv.x=dv.y=0.0; for(k=0; k < nterms; k++) { s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k]; du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k]; du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k]; s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms]; dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms]; dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms]; } ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y ); break; } case ArcDistortion: { /* what is the angle and radius in the destination image */ s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI); s.x -= MagickRound(s.x); /* angle */ s.y = hypot(d.x,d.y); /* radius */ /* Arc Distortion Partial Scaling Vectors Are derived by mapping the perpendicular unit vectors dR and dA*R*2PI rather than trying to map dx and dy The results is a very simple orthogonal aligned ellipse. */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[3] ); /* now scale the angle and radius for source image lookup point */ s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5; s.y = (coeff[2] - s.y) * coeff[3] + image->page.y; break; } case PolarDistortion: { /* 2D Cartesain to Polar View */ d.x -= coeff[2]; d.y -= coeff[3]; s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2; s.x /= Magick2PI; s.x -= MagickRound(s.x); s.x *= Magick2PI; /* angle - relative to centerline */ s.y = hypot(d.x,d.y); /* radius */ /* Polar Scaling vectors are based on mapping dR and dA vectors This results in very simple orthogonal scaling vectors */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[7] ); /* now finish mapping radius/angle to source x,y coords */ s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x; s.y = (s.y-coeff[1])*coeff[7] + image->page.y; break; } case DePolarDistortion: { /* @D Polar to Carteasain */ /* ignore all destination virtual offsets */ d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4]; d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1]; s.x = d.y*sin(d.x) + coeff[2]; s.y = d.y*cos(d.x) + coeff[3]; /* derivatives are usless - better to use SuperSampling */ break; } case Cylinder2PlaneDistortion: { /* 3D Cylinder to Tangential Plane */ double ax, cx; /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; d.x /= coeff[1]; /* x' = x/r */ ax=atan(d.x); /* aa = atan(x/r) = u/r */ cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */ s.x = coeff[1]*ax; /* u = r*atan(x/r) */ s.y = d.y*cx; /* v = y*cos(u/r) */ /* derivatives... (see personnal notes) */ ScaleFilter( resample_filter[id], 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); #if 0 if ( i == 0 && j == 0 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); fflush(stderr); } #endif /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case Plane2CylinderDistortion: { /* 3D Cylinder to Tangential Plane */ /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; /* is pixel valid - horizon of a infinite Virtual-Pixel Plane * (see Anthony Thyssen's personal note) */ validity = (double) ((coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5); if ( validity > 0.0 ) { double cx,tx; d.x /= coeff[1]; /* x'= x/r */ cx = 1/cos(d.x); /* cx = 1/cos(x/r) */ tx = tan(d.x); /* tx = tan(x/r) */ s.x = coeff[1]*tx; /* u = r * tan(x/r) */ s.y = d.y*cx; /* v = y / cos(x/r) */ /* derivatives... (see Anthony Thyssen's personal notes) */ ScaleFilter( resample_filter[id], cx*cx, 0.0, s.y*cx/coeff[1], cx ); #if 1 /*if ( i == 0 && j == 0 ) {*/ if ( d.x == 0.5 && d.y == 0.5 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n", coeff[1], (double)(d.x * 180.0/MagickPI), validity ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", cx*cx, 0.0, s.y*cx/coeff[1], cx); fflush(stderr); } #endif } /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case BarrelDistortion: case BarrelInverseDistortion: { /* Lens Barrel Distionion Correction */ double r,fx,fy,gx,gy; /* Radial Polynomial Distortion (de-normalized) */ d.x -= coeff[8]; d.y -= coeff[9]; r = sqrt(d.x*d.x+d.y*d.y); if ( r > MagickEpsilon ) { fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3]; fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7]; gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r; gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r; /* adjust functions and scaling for 'inverse' form */ if ( method == BarrelInverseDistortion ) { fx = 1/fx; fy = 1/fy; gx *= -fx*fx; gy *= -fy*fy; } /* Set the source pixel to lookup and EWA derivative vectors */ s.x = d.x*fx + coeff[8]; s.y = d.y*fy + coeff[9]; ScaleFilter( resample_filter[id], gx*d.x*d.x + fx, gx*d.x*d.y, gy*d.x*d.y, gy*d.y*d.y + fy ); } else { /* Special handling to avoid divide by zero when r==0 ** ** The source and destination pixels match in this case ** which was set at the top of the loop using s = d; ** otherwise... s.x=coeff[8]; s.y=coeff[9]; */ if ( method == BarrelDistortion ) ScaleFilter( resample_filter[id], coeff[3], 0, 0, coeff[7] ); else /* method == BarrelInverseDistortion */ /* FUTURE, trap for D==0 causing division by zero */ ScaleFilter( resample_filter[id], 1.0/coeff[3], 0, 0, 1.0/coeff[7] ); } break; } case ShepardsDistortion: { /* Shepards Method, or Inverse Weighted Distance for displacement around the destination image control points The input arguments are the coefficents to the function. This is more of a 'displacement' function rather than an absolute distortion function. Note: We can not determine derivatives using shepards method so only a point sample interpolatation can be used. */ size_t i; double denominator; denominator = s.x = s.y = 0; for(i=0; i<number_arguments; i+=4) { double weight = ((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2]) + ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]); weight = pow(weight,coeff[0]); /* shepards power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; s.x += (arguments[ i ]-arguments[i+2])*weight; s.y += (arguments[i+1]-arguments[i+3])*weight; denominator += weight; } s.x /= denominator; s.y /= denominator; s.x += d.x; /* make it as relative displacement */ s.y += d.y; break; } default: break; /* use the default no-op given above */ } /* map virtual canvas location back to real image coordinate */ if ( bestfit && method != ArcDistortion ) { s.x -= image->page.x; s.y -= image->page.y; } s.x -= 0.5; s.y -= 0.5; if ( validity <= 0.0 ) { /* result of distortion is an invalid pixel - don't resample */ SetPixelPacket(distort_image,&invalid,q,indexes); } else { /* resample the source image to find its correct color */ (void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel); /* if validity between 0.0 and 1.0 mix result with invalid pixel */ if ( validity < 1.0 ) { /* Do a blend of sample color and invalid pixel */ /* should this be a 'Blend', or an 'Over' compose */ MagickPixelCompositeBlend(&pixel,validity,&invalid,(1.0-validity), &pixel); } SetPixelPacket(distort_image,&pixel,q,indexes); } q++; indexes++; } sync=SyncCacheViewAuthenticPixels(distort_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,DistortImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } distort_view=DestroyCacheView(distort_view); resample_filter=DestroyResampleFilterThreadSet(resample_filter); if (status == MagickFalse) distort_image=DestroyImage(distort_image); } /* Arc does not return an offset unless 'bestfit' is in effect And the user has not provided an overriding 'viewport'. */ if ( method == ArcDistortion && !bestfit && !viewport_given ) { distort_image->page.x = 0; distort_image->page.y = 0; } coeff=(double *) RelinquishMagickMemory(coeff); return(distort_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. RotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the RotateImage method is: % % Image *RotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *distort_image, *rotate_image; MagickRealType angle; PointInfo shear; size_t rotations; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=fmod(degrees,360.0); while (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon)) return(IntegralRotateImage(image,rotations,exception)); distort_image=CloneImage(image,0,0,MagickTrue,exception); if (distort_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod); rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1, &degrees,MagickTrue,exception); distort_image=DestroyImage(distort_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p a r s e C o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SparseColorImage(), given a set of coordinates, interpolates the colors % found at those coordinates, across the whole image, using various methods. % % The format of the SparseColorImage() method is: % % Image *SparseColorImage(const Image *image,const ChannelType channel, % const SparseColorMethod method,const size_t number_arguments, % const double *arguments,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be filled in. % % o channel: Specify which color values (in RGBKA sequence) are being set. % This also determines the number of color_values in above. % % o method: the method to fill in the gradient between the control points. % % The methods used for SparseColor() are often simular to methods % used for DistortImage(), and even share the same code for determination % of the function coefficents, though with more dimensions (or resulting % values). % % o number_arguments: the number of arguments given. % % o arguments: array of floating point arguments for this method-- % x,y,color_values-- with color_values given as normalized values. % % o exception: return any errors or warnings in this structure % */ MagickExport Image *SparseColorImage(const Image *image, const ChannelType channel,const SparseColorMethod method, const size_t number_arguments,const double *arguments, ExceptionInfo *exception) { #define SparseColorTag "Distort/SparseColor" SparseColorMethod sparse_method; double *coeff; Image *sparse_image; size_t number_colors; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Determine number of color values needed per control point */ number_colors=0; if ( channel & RedChannel ) number_colors++; if ( channel & GreenChannel ) number_colors++; if ( channel & BlueChannel ) number_colors++; if ( channel & IndexChannel ) number_colors++; if ( channel & OpacityChannel ) number_colors++; /* Convert input arguments into mapping coefficients, this this case we are mapping (distorting) colors, rather than coordinates. */ { DistortImageMethod distort_method; distort_method=(DistortImageMethod) method; if ( distort_method >= SentinelDistortion ) distort_method = ShepardsDistortion; /* Pretend to be Shepards */ coeff = GenerateCoefficients(image, &distort_method, number_arguments, arguments, number_colors, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Note some Distort Methods may fall back to other simpler methods, Currently the only fallback of concern is Bilinear to Affine (Barycentric), which is alaso sparse_colr method. This also ensures correct two and one color Barycentric handling. */ sparse_method = (SparseColorMethod) distort_method; if ( distort_method == ShepardsDistortion ) sparse_method = method; /* return non-distort methods to normal */ if ( sparse_method == InverseColorInterpolate ) coeff[0]=0.5; /* sqrt() the squared distance for inverse */ } /* Verbose output */ if ( GetImageArtifact(image,"verbose") != (const char *) NULL ) { switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n"); if ( channel & RedChannel ) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ( channel & GreenChannel ) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ( channel & BlueChannel ) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ( channel & IndexChannel ) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ( channel & OpacityChannel ) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n"); if ( channel & RedChannel ) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ( channel & GreenChannel ) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ( channel & BlueChannel ) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ( channel & IndexChannel ) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ( channel & OpacityChannel ) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; break; } default: /* sparse color method is too complex for FX emulation */ break; } } /* Generate new image for generated interpolated gradient. * ASIDE: Actually we could have just replaced the colors of the original * image, but IM Core policy, is if storage class could change then clone * the image. */ sparse_image=CloneImage(image,0,0,MagickTrue,exception); if (sparse_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sparse_image,DirectClass) == MagickFalse) { /* if image is ColorMapped - change it to DirectClass */ InheritException(exception,&image->exception); sparse_image=DestroyImage(sparse_image); return((Image *) NULL); } { /* ----- MAIN CODE ----- */ CacheView *sparse_view; MagickBooleanType status; MagickOffsetType progress; ssize_t j; status=MagickTrue; progress=0; sparse_view=AcquireAuthenticCacheView(sparse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sparse_image,sparse_image->rows,1) #endif for (j=0; j < (ssize_t) sparse_image->rows; j++) { MagickBooleanType sync; MagickPixelPacket pixel; /* pixel to assign to distorted image */ register IndexPacket *magick_restrict indexes; register ssize_t i; register PixelPacket *magick_restrict q; q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns, 1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(sparse_view); GetMagickPixelPacket(sparse_image,&pixel); for (i=0; i < (ssize_t) image->columns; i++) { SetMagickPixelPacket(image,q,indexes,&pixel); switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; if ( channel & RedChannel ) pixel.red = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ( channel & GreenChannel ) pixel.green = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ( channel & BlueChannel ) pixel.blue = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ( channel & IndexChannel ) pixel.index = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ( channel & OpacityChannel ) pixel.opacity = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; if ( channel & RedChannel ) pixel.red = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ( channel & GreenChannel ) pixel.green = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ( channel & BlueChannel ) pixel.blue = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ( channel & IndexChannel ) pixel.index = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ( channel & OpacityChannel ) pixel.opacity = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; break; } case InverseColorInterpolate: case ShepardsColorInterpolate: { /* Inverse (Squared) Distance weights average (IDW) */ size_t k; double denominator; if ( channel & RedChannel ) pixel.red = 0.0; if ( channel & GreenChannel ) pixel.green = 0.0; if ( channel & BlueChannel ) pixel.blue = 0.0; if ( channel & IndexChannel ) pixel.index = 0.0; if ( channel & OpacityChannel ) pixel.opacity = 0.0; denominator = 0.0; for(k=0; k<number_arguments; k+=2+number_colors) { register ssize_t x=(ssize_t) k+2; double weight = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); weight = pow(weight,coeff[0]); /* inverse of power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; if ( channel & RedChannel ) pixel.red += arguments[x++]*weight; if ( channel & GreenChannel ) pixel.green += arguments[x++]*weight; if ( channel & BlueChannel ) pixel.blue += arguments[x++]*weight; if ( channel & IndexChannel ) pixel.index += arguments[x++]*weight; if ( channel & OpacityChannel ) pixel.opacity += arguments[x++]*weight; denominator += weight; } if ( channel & RedChannel ) pixel.red /= denominator; if ( channel & GreenChannel ) pixel.green /= denominator; if ( channel & BlueChannel ) pixel.blue /= denominator; if ( channel & IndexChannel ) pixel.index /= denominator; if ( channel & OpacityChannel ) pixel.opacity /= denominator; break; } case ManhattanColorInterpolate: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for(k=0; k<number_arguments; k+=2+number_colors) { double distance = fabs((double)i-arguments[ k ]) + fabs((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ( channel & RedChannel ) pixel.red = arguments[x++]; if ( channel & GreenChannel ) pixel.green = arguments[x++]; if ( channel & BlueChannel ) pixel.blue = arguments[x++]; if ( channel & IndexChannel ) pixel.index = arguments[x++]; if ( channel & OpacityChannel ) pixel.opacity = arguments[x++]; minimum = distance; } } break; } case VoronoiColorInterpolate: default: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for(k=0; k<number_arguments; k+=2+number_colors) { double distance = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ( channel & RedChannel ) pixel.red = arguments[x++]; if ( channel & GreenChannel ) pixel.green = arguments[x++]; if ( channel & BlueChannel ) pixel.blue = arguments[x++]; if ( channel & IndexChannel ) pixel.index = arguments[x++]; if ( channel & OpacityChannel ) pixel.opacity = arguments[x++]; minimum = distance; } } break; } } /* set the color directly back into the source image */ if ( channel & RedChannel ) pixel.red=ClampPixel(QuantumRange*pixel.red); if ( channel & GreenChannel ) pixel.green=ClampPixel(QuantumRange*pixel.green); if ( channel & BlueChannel ) pixel.blue=ClampPixel(QuantumRange*pixel.blue); if ( channel & IndexChannel ) pixel.index=ClampPixel(QuantumRange*pixel.index); if ( channel & OpacityChannel ) pixel.opacity=ClampPixel(QuantumRange*pixel.opacity); SetPixelPacket(sparse_image,&pixel,q,indexes); q++; indexes++; } sync=SyncCacheViewAuthenticPixels(sparse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SparseColorTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sparse_view=DestroyCacheView(sparse_view); if (status == MagickFalse) sparse_image=DestroyImage(sparse_image); } coeff = (double *) RelinquishMagickMemory(coeff); return(sparse_image); }
GB_unop__identity_int16_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: (none) // op(A') function: GB_unop_tran__identity_int16_int16 // C type: int16_t // A type: int16_t // cast: int16_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( int16_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; int16_t z = aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int16_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_int32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int32_int16 // op(A') function: GB_tran__identity_int32_int16 // C type: int32_t // A type: int16_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int32_int16 ( int32_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_int64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_uint32 // op(A') function: GB_tran__abs_int64_uint32 // C type: int64_t // A type: uint32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_uint32 ( int64_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cpu.c
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "cpu.h" #include <limits.h> #include <stdio.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef __ANDROID__ #include <sys/syscall.h> #include <unistd.h> #include <stdint.h> #endif #ifdef __ANDROID__ // extract the ELF HW capabilities bitmap from /proc/self/auxv static unsigned int get_elf_hwcap_from_proc_self_auxv() { FILE* fp = fopen("/proc/self/auxv", "rb"); if (!fp) { return 0; } #define AT_HWCAP 16 #define AT_HWCAP2 26 #if __aarch64__ struct { uint64_t tag; uint64_t value; } entry; #else struct { unsigned int tag; unsigned int value; } entry; #endif unsigned int result = 0; while (!feof(fp)) { int nread = fread((char*)&entry, sizeof(entry), 1, fp); if (nread != 1) break; if (entry.tag == 0 && entry.value == 0) break; if (entry.tag == AT_HWCAP) { result = entry.value; break; } } fclose(fp); return result; } static unsigned int g_hwcaps = get_elf_hwcap_from_proc_self_auxv(); #if __aarch64__ // from arch/arm64/include/uapi/asm/hwcap.h #define HWCAP_ASIMD (1 << 1) #define HWCAP_ASIMDHP (1 << 10) #else // from arch/arm/include/uapi/asm/hwcap.h #define HWCAP_NEON (1 << 12) #define HWCAP_VFPv4 (1 << 16) #endif #endif // __ANDROID__ int cpu_support_arm_neon() { #ifdef __ANDROID__ #if __aarch64__ return g_hwcaps & HWCAP_ASIMD; #else return g_hwcaps & HWCAP_NEON; #endif #else return 0; #endif } int cpu_support_arm_vfpv4() { #ifdef __ANDROID__ #if __aarch64__ // neon always enable fma and fp16 return g_hwcaps & HWCAP_ASIMD; #else return g_hwcaps & HWCAP_VFPv4; #endif #else return 0; #endif } int cpu_support_arm_asimdhp() { #ifdef __ANDROID__ #if __aarch64__ return g_hwcaps & HWCAP_ASIMDHP; #else return 0; #endif #else return 0; #endif } static int get_cpucount() { int count = 0; #ifdef __ANDROID__ // get cpu count from /proc/cpuinfo FILE* fp = fopen("/proc/cpuinfo", "rb"); if (!fp) return 1; char line[1024]; while (!feof(fp)) { char* s = fgets(line, 1024, fp); if (!s) break; if (memcmp(line, "processor", 9) == 0) { count++; } } fclose(fp); #else #ifdef _OPENMP count = omp_get_max_threads(); #else count = 1; #endif // _OPENMP #endif if (count < 1) count = 1; if (count > (int)sizeof(size_t) * 8) { fprintf(stderr, "more than %d cpu detected, thread affinity may not work properly :(\n", (int)sizeof(size_t) * 8); } return count; } static int g_cpucount = -1; inline int get_cpu_count() { // retrieve gpu count if not initialized if (g_cpucount == -1) { g_cpucount = get_cpucount(); } return g_cpucount; } #ifdef __ANDROID__ static int get_max_freq_khz(int cpuid) { // first try, for all possible cpu char path[256]; sprintf(path, "/sys/devices/system/cpu/cpufreq/stats/cpu%d/time_in_state", cpuid); FILE* fp = fopen(path, "rb"); if (!fp) { // second try, for online cpu sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/stats/time_in_state", cpuid); fp = fopen(path, "rb"); if (fp) { int max_freq_khz = 0; while (!feof(fp)) { int freq_khz = 0; int nscan = fscanf(fp, "%d %*d", &freq_khz); if (nscan != 1) break; if (freq_khz > max_freq_khz) max_freq_khz = freq_khz; } fclose(fp); if (max_freq_khz != 0) return max_freq_khz; fp = NULL; } if (!fp) { // third try, for online cpu sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/cpuinfo_max_freq", cpuid); fp = fopen(path, "rb"); if (!fp) return -1; int max_freq_khz = -1; fscanf(fp, "%d", &max_freq_khz); fclose(fp); return max_freq_khz; } } int max_freq_khz = 0; while (!feof(fp)) { int freq_khz = 0; int nscan = fscanf(fp, "%d %*d", &freq_khz); if (nscan != 1) break; if (freq_khz > max_freq_khz) max_freq_khz = freq_khz; } fclose(fp); return max_freq_khz; } static int set_sched_affinity(size_t thread_affinity_mask) { // cpu_set_t definition // ref http://stackoverflow.com/questions/16319725/android-set-thread-affinity #define CPU_SETSIZE 1024 #define __NCPUBITS (8 * sizeof (unsigned long)) typedef struct { unsigned long __bits[CPU_SETSIZE / __NCPUBITS]; } cpu_set_t; #define CPU_SET(cpu, cpusetp) \ ((cpusetp)->__bits[(cpu)/__NCPUBITS] |= (1UL << ((cpu) % __NCPUBITS))) #define CPU_ZERO(cpusetp) \ memset((cpusetp), 0, sizeof(cpu_set_t)) // set affinity for thread #ifdef __GLIBC__ pid_t pid = syscall(SYS_gettid); #else #ifdef PI3 pid_t pid = getpid(); #else pid_t pid = gettid(); #endif #endif cpu_set_t mask; CPU_ZERO(&mask); for (int i=0; i<(int)sizeof(size_t) * 8; i++) { if (thread_affinity_mask & (1 << i)) CPU_SET(i, &mask); } int syscallret = syscall(__NR_sched_setaffinity, pid, sizeof(mask), &mask); if (syscallret) { fprintf(stderr, "syscall error %d\n", syscallret); return -1; } return 0; } #endif // __ANDROID__ static int g_powersave = 0; int get_cpu_powersave() { return g_powersave; } int set_cpu_powersave(int powersave) { if (powersave < 0 || powersave > 2) { fprintf(stderr, "powersave %d not supported\n", powersave); return -1; } size_t thread_affinity_mask = get_cpu_thread_affinity_mask(powersave); int ret = set_cpu_thread_affinity(thread_affinity_mask); if (ret != 0) return ret; g_powersave = powersave; return 0; } static size_t g_thread_affinity_mask_all = 0; static size_t g_thread_affinity_mask_little = 0; static size_t g_thread_affinity_mask_big = 0; static int setup_thread_affinity_masks() { g_thread_affinity_mask_all = (1 << get_cpu_count()) - 1; #ifdef __ANDROID__ int max_freq_khz_min = INT_MAX; int max_freq_khz_max = 0; vector_def(int) cpu_max_freq_khz; vector_init(cpu_max_freq_khz); vector_resize(cpu_max_freq_khz, get_cpu_count()); for (int i=0; i<get_cpu_count(); i++) { int max_freq_khz = get_max_freq_khz(i); // fprintf(stderr, "%d max freq = %d khz\n", i, max_freq_khz); cpu_max_freq_khz[i] = max_freq_khz; if (max_freq_khz > max_freq_khz_max) max_freq_khz_max = max_freq_khz; if (max_freq_khz < max_freq_khz_min) max_freq_khz_min = max_freq_khz; } int max_freq_khz_medium = (max_freq_khz_min + max_freq_khz_max) / 2; if (max_freq_khz_medium == max_freq_khz_max) { g_thread_affinity_mask_little = 0; g_thread_affinity_mask_big = g_thread_affinity_mask_all; return 0; } for (int i=0; i<get_cpu_count(); i++) { if (cpu_max_freq_khz[i] < max_freq_khz_medium) g_thread_affinity_mask_little |= (1 << i); else g_thread_affinity_mask_big |= (1 << i); } vector_destroy(cpu_max_freq_khz); #else // TODO implement me for other platforms g_thread_affinity_mask_little = 0; g_thread_affinity_mask_big = g_thread_affinity_mask_all; #endif return 0; } size_t get_cpu_thread_affinity_mask(int powersave) { if (g_thread_affinity_mask_all == 0) { setup_thread_affinity_masks(); } if (g_thread_affinity_mask_little == 0) { // SMP cpu powersave not supported // fallback to all cores anyway return g_thread_affinity_mask_all; } if (powersave == 0) return g_thread_affinity_mask_all; if (powersave == 1) return g_thread_affinity_mask_little; if (powersave == 2) return g_thread_affinity_mask_big; fprintf(stderr, "powersave %d not supported\n", powersave); // fallback to all cores anyway return g_thread_affinity_mask_all; } int set_cpu_thread_affinity(size_t thread_affinity_mask) { #ifdef __ANDROID__ int num_threads = 0; for (int i=0; i<(int)sizeof(size_t) * 8; i++) { if (thread_affinity_mask & (1 << i)) num_threads++; } #ifdef _OPENMP // set affinity for each thread set_omp_num_threads(num_threads); vector_def(int) ssarets; vector_init(ssarets); vector_resize(ssarets, num_threads); #pragma omp parallel for num_threads(num_threads) for (int i=0; i<num_threads; i++) { vector_get(ssarets, i) = set_sched_affinity(thread_affinity_mask); } for (int i=0; i<num_threads; i++) { if (vector_get(ssarets, i) != 0) return -1; } vector_destroy(ssarets); #else int ssaret = set_sched_affinity(thread_affinity_mask); if (ssaret != 0) return -1; #endif return 0; #else // TODO (void)thread_affinity_mask; return -1; #endif } int get_omp_num_threads() { #ifdef _OPENMP return omp_get_num_threads(); #else return 1; #endif } void set_omp_num_threads(int num_threads) { #ifdef _OPENMP omp_set_num_threads(num_threads); #else (void)num_threads; #endif } int get_omp_dynamic() { #ifdef _OPENMP return omp_get_dynamic(); #else return 0; #endif } void set_omp_dynamic(int dynamic) { #ifdef _OPENMP omp_set_dynamic(dynamic); #else (void)dynamic; #endif } int get_omp_thread_num() { #if _OPENMP return omp_get_thread_num(); #else return 0; #endif }
csr_spgemm.h
/* * Copyright 2008-2014 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cusp/array1d.h> #include <cusp/detail/temporary_array.h> #include <iostream> #include <list> namespace cusp { namespace system { namespace omp { namespace detail { //MW: note that this function is also used by coo.h //MW: computes the total number of nonzeors of C template <typename DerivedPolicy, typename Array1, typename Array2, typename Array3, typename Array4, typename Array5> size_t spmm_csr_pass1(omp::execution_policy<DerivedPolicy>& exec, const size_t num_rows, const size_t num_cols, const Array1& A_row_offsets, const Array2& A_column_indices, const Array3& B_row_offsets, const Array4& B_column_indices, Array5& C_row_offsets) { typedef typename Array1::value_type IndexType1; typedef typename Array2::value_type IndexType2; C_row_offsets[0] = 0; #pragma omp parallel { cusp::detail::temporary_array<int, DerivedPolicy> mask(exec, num_cols, -1); // Compute nnz in C (including explicit zeros) #pragma omp for for(int i = 0; i < int(num_rows); i++) { size_t num_nonzeros = 0; for(IndexType1 jj = A_row_offsets[i]; jj < A_row_offsets[i + 1]; jj++) { IndexType1 j = A_column_indices[jj]; for(IndexType2 kk = B_row_offsets[j]; kk < B_row_offsets[j + 1]; kk++) { IndexType2 k = B_column_indices[kk]; if(mask[k] != i) { mask[k] = i; num_nonzeros++; } } } C_row_offsets[i + 1] = num_nonzeros; } // end for loop }// end omp parallel for(int i = 1; i < int(num_rows + 1); i++) C_row_offsets[i] += C_row_offsets[i - 1]; return C_row_offsets[num_rows]; } //MW: note that this function is also used by coo.h template <typename DerivedPolicy, typename Array1, typename Array2, typename Array3, typename Array4, typename Array5, typename Array6, typename Array7, typename Array8, typename Array9, typename UnaryFunction, typename BinaryFunction1, typename BinaryFunction2> void spmm_csr_pass2(omp::execution_policy<DerivedPolicy>& exec, const size_t num_rows, const size_t num_cols, const Array1& A_row_offsets, const Array2& A_column_indices, const Array3& A_values, const Array4& B_row_offsets, const Array5& B_column_indices, const Array6& B_values, Array7& C_row_offsets, Array8& C_column_indices, Array9& C_values, UnaryFunction initialize, BinaryFunction1 combine, BinaryFunction2 reduce) { typedef typename Array7::value_type IndexType; typedef typename Array9::value_type ValueType; const IndexType unseen = static_cast<IndexType>(-1); const IndexType init = static_cast<IndexType>(-2); #pragma omp parallel { // Compute entries of C cusp::detail::temporary_array<IndexType, DerivedPolicy> next(exec, num_cols, unseen); cusp::detail::temporary_array<ValueType, DerivedPolicy> sums(exec, num_cols, initialize(ValueType(0))); #pragma omp for for (int i = 0; i < int(num_rows); i++) { IndexType head = init; IndexType length = 0; IndexType jj_start = A_row_offsets[i]; IndexType jj_end = A_row_offsets[i + 1]; for (IndexType jj = jj_start; jj < jj_end; jj++) { IndexType j = A_column_indices[jj]; ValueType v = A_values[jj]; IndexType kk_start = B_row_offsets[j]; IndexType kk_end = B_row_offsets[j + 1]; for (IndexType kk = kk_start; kk < kk_end; kk++) { IndexType k = B_column_indices[kk]; ValueType b = B_values[kk]; sums[k] = reduce(sums[k], combine(v, b)); if (next[k] == unseen) { next[k] = head; head = k; length++; } } } size_t offset = C_row_offsets[i]; for (IndexType jj = 0; jj < length; jj++) { C_column_indices[offset] = head; C_values[offset] = sums[head]; offset++; IndexType temp = head; head = next[head]; // clear arrays next[temp] = unseen; sums[temp] = ValueType(0); } } // end for loop } //omp parallel } template <typename DerivedPolicy, typename MatrixType1, typename MatrixType2, typename MatrixType3, typename UnaryFunction, typename BinaryFunction1, typename BinaryFunction2> void multiply(omp::execution_policy<DerivedPolicy>& exec, const MatrixType1& A, const MatrixType2& B, MatrixType3& C, UnaryFunction initialize, BinaryFunction1 combine, BinaryFunction2 reduce, cusp::csr_format, cusp::csr_format, cusp::csr_format) { C.resize(A.num_rows, B.num_cols, 0); size_t num_nonzeros = spmm_csr_pass1(exec, A.num_rows, B.num_cols, A.row_offsets, A.column_indices, B.row_offsets, B.column_indices, C.row_offsets); // Resize output C.resize(A.num_rows, B.num_cols, num_nonzeros); spmm_csr_pass2(exec, A.num_rows, B.num_cols, A.row_offsets, A.column_indices, A.values, B.row_offsets, B.column_indices, B.values, C.row_offsets, C.column_indices, C.values, initialize, combine, reduce); } } // end namespace detail } // end namespace omp } // end namespace system } // end namespace cusp
taskloop.c
// RUN: %libomp-compile-and-run | FileCheck %s // RUN: %libomp-compile-and-run | FileCheck --check-prefix=TASKS %s // REQUIRES: ompt // These compilers don't support the taskloop construct // UNSUPPORTED: gcc-4, gcc-5, icc-16 // GCC 6 has support for taskloops, but at least 6.3.0 is crashing on this test // UNSUPPORTED: gcc-6 #include "callback.h" #include <omp.h> int main() { unsigned int i, x; #pragma omp parallel num_threads(2) { #pragma omp barrier #pragma omp master #pragma omp taskloop for (i = 0; i < 5; i += 3) { x++; } } // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: // CHECK-SAME: parent_task_id={{[0-9]+}} // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]] // CHECK-SAME: requested_team_size=2 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID1:[0-9]+]] // CHECK-SAME: team_size=2, thread_num=0 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_taskgroup_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID1]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_taskloop_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID1]] // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]], count=2 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID1]] // CHECK-SAME: new_task_id=[[TASK_ID1:[0-9]+]] // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS]] // CHECK-SAME: task_type=ompt_task_explicit=4 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID1]] // CHECK-SAME: new_task_id=[[TASK_ID2:[0-9]+]] // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS]] // CHECK-SAME: task_type=ompt_task_explicit=4 // CHECK-NOT: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK: {{^}}[[MASTER_ID]]: ompt_event_taskloop_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID1]] // CHECK-SAME: count=2 // CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_wait_taskgroup_begin: // CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_taskgroup_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID1]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_taskgroup_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID1]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=0 // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID1]], team_size=2, thread_num=0 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // TASKS: ompt_event_initial_task_begin:{{.*}} task_id={{[0-9]+}} // TASKS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_taskloop_begin: // TASKS: ompt_event_task_create:{{.*}} new_task_id=[[TASK_ID1:[0-9]+]] // TASKS-SAME: task_type=ompt_task_explicit // TASKS-DAG: ompt_event_task_create:{{.*}} new_task_id=[[TASK_ID2:[0-9]+]] // Schedule events: // TASKS-DAG: {{^.*}}first_task_id={{[0-9]+}}, second_task_id=[[TASK_ID1]] // TASKS-DAG: {{^.*}}first_task_id=[[TASK_ID1]], second_task_id={{[0-9]+}} // TASKS-DAG: {{^.*}}first_task_id={{[0-9]+}}, second_task_id=[[TASK_ID2]] // TASKS-DAG: {{^.*}}first_task_id=[[TASK_ID2]], second_task_id={{[0-9]+}} // TASKS-NOT: ompt_event_task_schedule return 0; }
GB_unaryop__lnot_int32_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int32_int32 // op(A') function: GB_tran__lnot_int32_int32 // C type: int32_t // A type: int32_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int32_int32 ( int32_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ark_brusselator1D_omp.c
/*--------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds @ SMU *--------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2019, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End *--------------------------------------------------------------- * Example problem: * * The following test simulates a brusselator problem from chemical * kinetics. This is n PDE system with 3 components, Y = [u,v,w], * satisfying the equations, * u_t = du*u_xx + a - (w+1)*u + v*u^2 * v_t = dv*v_xx + w*u - v*u^2 * w_t = dw*w_xx + (b-w)/ep - w*u * for t in [0, 80], x in [0, 1], with initial conditions * u(0,x) = a + 0.1*sin(pi*x) * v(0,x) = b/a + 0.1*sin(pi*x) * w(0,x) = b + 0.1*sin(pi*x), * and with stationary boundary conditions, i.e. * u_t(t,0) = u_t(t,1) = 0, * v_t(t,0) = v_t(t,1) = 0, * w_t(t,0) = w_t(t,1) = 0. * Note: these can also be implemented as Dirichlet boundary * conditions with values identical to the initial conditions. * * The spatial derivatives are computed using second-order * centered differences, with the data distributed over N points * on a uniform spatial grid. * * This program solves the problem with the DIRK method, using a * Newton iteration with the band linear solver, and a * user-supplied Jacobian routine. This example uses the OpenMP * vector kernel, and employs OpenMP threading within the * right-hand side and Jacobian construction functions. * * 100 outputs are printed at equal intervals, and run statistics * are printed at the end. *---------------------------------------------------------------*/ /* Header files */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <arkode/arkode_arkstep.h> /* prototypes for ARKStep fcts., consts */ #include <nvector/nvector_openmp.h> /* access to OpenMP N_Vector */ #include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */ #include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */ #include <sundials/sundials_types.h> /* def. of type 'realtype' */ #ifdef _OPENMP #include <omp.h> /* OpenMP functions */ #endif #if defined(SUNDIALS_EXTENDED_PRECISION) #define GSYM "Lg" #define ESYM "Le" #define FSYM "Lf" #else #define GSYM "g" #define ESYM "e" #define FSYM "f" #endif /* accessor macros between (x,v) location and 1D NVector array */ #define IDX(x,v) (3*(x)+v) /* user data structure */ typedef struct { sunindextype N; /* number of intervals */ int nthreads; /* number of OpenMP threads */ realtype dx; /* mesh spacing */ realtype a; /* constant forcing on u */ realtype b; /* steady-state value of w */ realtype du; /* diffusion coeff for u */ realtype dv; /* diffusion coeff for v */ realtype dw; /* diffusion coeff for w */ realtype ep; /* stiffness parameter */ } *UserData; /* User-supplied Functions Called by the Solver */ static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data); static int Jac(realtype t, N_Vector y, N_Vector fy, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3); /* Private helper functions */ static int LaplaceMatrix(realtype c, SUNMatrix Jac, UserData udata); static int ReactionJac(realtype c, N_Vector y, SUNMatrix Jac, UserData udata); /* Private function to check function return values */ static int check_flag(void *flagvalue, const char *funcname, int opt); /* Main Program */ int main(int argc, char *argv[]) { /* general problem parameters */ realtype T0 = RCONST(0.0); /* initial time */ realtype Tf = RCONST(10.0); /* final time */ int Nt = 100; /* total number of output times */ int Nvar = 3; /* number of solution fields */ UserData udata = NULL; realtype *data; sunindextype N = 201; /* spatial mesh size */ realtype a = 0.6; /* problem parameters */ realtype b = 2.0; realtype du = 0.025; realtype dv = 0.025; realtype dw = 0.025; realtype ep = 1.0e-5; /* stiffness parameter */ realtype reltol = 1.0e-6; /* tolerances */ realtype abstol = 1.0e-10; sunindextype NEQ, i; /* general problem variables */ int flag; /* reusable error-checking flag */ N_Vector y = NULL; /* empty vector for storing solution */ N_Vector umask = NULL; /* empty mask vectors for viewing solution components */ N_Vector vmask = NULL; N_Vector wmask = NULL; SUNMatrix A = NULL; /* empty matrix for linear solver */ SUNLinearSolver LS = NULL; /* empty linear solver structure */ void *arkode_mem = NULL; /* empty ARKode memory structure */ realtype pi, t, dTout, tout, u, v, w; FILE *FID, *UFID, *VFID, *WFID; int iout, num_threads; long int nst, nst_a, nfe, nfi, nsetups, nje, nfeLS, nni, ncfn, netf; /* allocate udata structure */ udata = (UserData) malloc(sizeof(*udata)); if (check_flag((void *) udata, "malloc", 2)) return 1; /* set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS environment variable */ #endif if (argc > 1) /* overwrite with command line value, if supplied */ num_threads = (int) strtol(argv[1], NULL, 0); /* store the inputs in the UserData structure */ udata->N = N; udata->a = a; udata->b = b; udata->du = du; udata->dv = dv; udata->dw = dw; udata->ep = ep; udata->nthreads = num_threads; /* set total allocated vector length */ NEQ = Nvar*udata->N; /* Initial problem output */ printf("\n1D Brusselator PDE test problem:\n"); printf(" N = %li, NEQ = %li\n", (long int) udata->N, (long int) NEQ); printf(" num_threads = %i\n", num_threads); printf(" problem parameters: a = %"GSYM", b = %"GSYM", ep = %"GSYM"\n", udata->a, udata->b, udata->ep); printf(" diffusion coefficients: du = %"GSYM", dv = %"GSYM", dw = %"GSYM"\n", udata->du, udata->dv, udata->dw); printf(" reltol = %.1"ESYM", abstol = %.1"ESYM"\n\n", reltol, abstol); /* Initialize vector data structures */ y = N_VNew_OpenMP(NEQ, num_threads); /* Create vector for solution */ if (check_flag((void *)y, "N_VNew_OpenMP", 0)) return 1; udata->dx = RCONST(1.0)/(N-1); /* set spatial mesh spacing */ data = N_VGetArrayPointer(y); /* Access data array for new NVector y */ if (check_flag((void *)data, "N_VGetArrayPointer", 0)) return 1; umask = N_VNew_OpenMP(NEQ, num_threads); /* Create vector masks */ if (check_flag((void *)umask, "N_VNew_OpenMP", 0)) return 1; vmask = N_VNew_OpenMP(NEQ, num_threads); if (check_flag((void *)vmask, "N_VNew_OpenMP", 0)) return 1; wmask = N_VNew_OpenMP(NEQ, num_threads); if (check_flag((void *)wmask, "N_VNew_OpenMP", 0)) return 1; /* Set initial conditions into y */ pi = RCONST(4.0)*atan(RCONST(1.0)); for (i=0; i<N; i++) { data[IDX(i,0)] = a + RCONST(0.1)*sin(pi*i*udata->dx); /* u */ data[IDX(i,1)] = b/a + RCONST(0.1)*sin(pi*i*udata->dx); /* v */ data[IDX(i,2)] = b + RCONST(0.1)*sin(pi*i*udata->dx); /* w */ } /* Set mask array values for each solution component */ N_VConst(0.0, umask); data = N_VGetArrayPointer(umask); if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1; for (i=0; i<N; i++) data[IDX(i,0)] = RCONST(1.0); N_VConst(0.0, vmask); data = N_VGetArrayPointer(vmask); if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1; for (i=0; i<N; i++) data[IDX(i,1)] = RCONST(1.0); N_VConst(0.0, wmask); data = N_VGetArrayPointer(wmask); if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1; for (i=0; i<N; i++) data[IDX(i,2)] = RCONST(1.0); /* Initialize matrix and linear solver data structures */ A = SUNBandMatrix(NEQ, 4, 4); if (check_flag((void *)A, "SUNBandMatrix", 0)) return 1; LS = SUNLinSol_Band(y, A); if (check_flag((void *)LS, "SUNLinSol_Band", 0)) return 1; /* Call ARKStepCreate to initialize the ARK timestepper module and specify the right-hand side function in y'=f(t,y), the inital time T0, and the initial dependent variable vector y. Note: since this problem is fully implicit, we set f_E to NULL and f_I to f. */ arkode_mem = ARKStepCreate(NULL, f, T0, y); if (check_flag((void *)arkode_mem, "ARKStepCreate", 0)) return 1; /* Set routines */ flag = ARKStepSetUserData(arkode_mem, (void *) udata); /* Pass udata to user functions */ if (check_flag(&flag, "ARKStepSetUserData", 1)) return 1; flag = ARKStepSStolerances(arkode_mem, reltol, abstol); /* Specify tolerances */ if (check_flag(&flag, "ARKStepSStolerances", 1)) return 1; /* Linear solver specification */ flag = ARKStepSetLinearSolver(arkode_mem, LS, A); /* Attach matrix and linear solver */ if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1; flag = ARKStepSetJacFn(arkode_mem, Jac); /* Set the Jacobian routine */ if (check_flag(&flag, "ARKStepSetJacFn", 1)) return 1; /* output spatial mesh to disk */ FID=fopen("bruss_mesh.txt","w"); for (i=0; i<N; i++) fprintf(FID," %.16"ESYM"\n", udata->dx*i); fclose(FID); /* Open output stream for results, access data arrays */ UFID=fopen("bruss_u.txt","w"); VFID=fopen("bruss_v.txt","w"); WFID=fopen("bruss_w.txt","w"); /* output initial condition to disk */ data = N_VGetArrayPointer(y); if (check_flag((void *)data, "N_VGetArrayPointer", 0)) return 1; for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM, data[IDX(i,0)]); for (i=0; i<N; i++) fprintf(VFID," %.16"ESYM, data[IDX(i,1)]); for (i=0; i<N; i++) fprintf(WFID," %.16"ESYM, data[IDX(i,2)]); fprintf(UFID,"\n"); fprintf(VFID,"\n"); fprintf(WFID,"\n"); /* Main time-stepping loop: calls ARKStepEvolve to perform the integration, then prints results. Stops when the final time has been reached */ t = T0; dTout = (Tf-T0)/Nt; tout = T0+dTout; printf(" t ||u||_rms ||v||_rms ||w||_rms\n"); printf(" ----------------------------------------------\n"); for (iout=0; iout<Nt; iout++) { flag = ARKStepEvolve(arkode_mem, tout, y, &t, ARK_NORMAL); /* call integrator */ if (check_flag(&flag, "ARKStepEvolve", 1)) break; u = N_VWL2Norm(y,umask); /* access/print solution statistics */ u = sqrt(u*u/N); v = N_VWL2Norm(y,vmask); v = sqrt(v*v/N); w = N_VWL2Norm(y,wmask); w = sqrt(w*w/N); printf(" %10.6"FSYM" %10.6"FSYM" %10.6"FSYM" %10.6"FSYM"\n", t, u, v, w); if (flag >= 0) { /* successful solve: update output time */ tout += dTout; tout = (tout > Tf) ? Tf : tout; } else { /* unsuccessful solve: break */ fprintf(stderr,"Solver failure, stopping integration\n"); break; } /* output results to disk */ for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM, data[IDX(i,0)]); for (i=0; i<N; i++) fprintf(VFID," %.16"ESYM, data[IDX(i,1)]); for (i=0; i<N; i++) fprintf(WFID," %.16"ESYM, data[IDX(i,2)]); fprintf(UFID,"\n"); fprintf(VFID,"\n"); fprintf(WFID,"\n"); } printf(" ----------------------------------------------\n"); fclose(UFID); fclose(VFID); fclose(WFID); /* Print some final statistics */ flag = ARKStepGetNumSteps(arkode_mem, &nst); check_flag(&flag, "ARKStepGetNumSteps", 1); flag = ARKStepGetNumStepAttempts(arkode_mem, &nst_a); check_flag(&flag, "ARKStepGetNumStepAttempts", 1); flag = ARKStepGetNumRhsEvals(arkode_mem, &nfe, &nfi); check_flag(&flag, "ARKStepGetNumRhsEvals", 1); flag = ARKStepGetNumLinSolvSetups(arkode_mem, &nsetups); check_flag(&flag, "ARKStepGetNumLinSolvSetups", 1); flag = ARKStepGetNumErrTestFails(arkode_mem, &netf); check_flag(&flag, "ARKStepGetNumErrTestFails", 1); flag = ARKStepGetNumNonlinSolvIters(arkode_mem, &nni); check_flag(&flag, "ARKStepGetNumNonlinSolvIters", 1); flag = ARKStepGetNumNonlinSolvConvFails(arkode_mem, &ncfn); check_flag(&flag, "ARKStepGetNumNonlinSolvConvFails", 1); flag = ARKStepGetNumJacEvals(arkode_mem, &nje); check_flag(&flag, "ARKStepGetNumJacEvals", 1); flag = ARKStepGetNumLinRhsEvals(arkode_mem, &nfeLS); check_flag(&flag, "ARKStepGetNumLinRhsEvals", 1); printf("\nFinal Solver Statistics:\n"); printf(" Internal solver steps = %li (attempted = %li)\n", nst, nst_a); printf(" Total RHS evals: Fe = %li, Fi = %li\n", nfe, nfi); printf(" Total linear solver setups = %li\n", nsetups); printf(" Total RHS evals for setting up the linear system = %li\n", nfeLS); printf(" Total number of Jacobian evaluations = %li\n", nje); printf(" Total number of Newton iterations = %li\n", nni); printf(" Total number of nonlinear solver convergence failures = %li\n", ncfn); printf(" Total number of error test failures = %li\n\n", netf); /* Clean up and return with successful completion */ free(udata); /* Free user data */ ARKStepFree(&arkode_mem); /* Free integrator memory */ SUNLinSolFree(LS); /* Free linear solver */ SUNMatDestroy(A); /* Free matrix */ N_VDestroy(y); /* Free vectors */ N_VDestroy(umask); N_VDestroy(vmask); N_VDestroy(wmask); return 0; } /*------------------------------- * Functions called by the solver *-------------------------------*/ /* f routine to compute the ODE RHS function f(t,y). */ static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data) { UserData udata = (UserData) user_data; /* access problem data */ sunindextype N = udata->N; /* set variable shortcuts */ realtype a = udata->a; realtype b = udata->b; realtype ep = udata->ep; realtype du = udata->du; realtype dv = udata->dv; realtype dw = udata->dw; realtype dx = udata->dx; realtype *Ydata=NULL, *dYdata=NULL; realtype uconst, vconst, wconst, u, ul, ur, v, vl, vr, w, wl, wr; sunindextype i; /* clear out ydot (to be careful) */ N_VConst(0.0, ydot); Ydata = N_VGetArrayPointer(y); /* access data arrays */ if (check_flag((void *)Ydata, "N_VGetArrayPointer", 0)) return 1; dYdata = N_VGetArrayPointer(ydot); if (check_flag((void *)dYdata, "N_VGetArrayPointer", 0)) return 1; N_VConst(0.0, ydot); /* initialize ydot to zero */ /* iterate over domain, computing all equations */ uconst = du/dx/dx; vconst = dv/dx/dx; wconst = dw/dx/dx; #pragma omp parallel for default(shared) private(i,u,ul,ur,v,vl,vr,w,wl,wr) schedule(static) num_threads(udata->nthreads) for (i=1; i<N-1; i++) { /* set shortcuts */ u = Ydata[IDX(i,0)]; ul = Ydata[IDX(i-1,0)]; ur = Ydata[IDX(i+1,0)]; v = Ydata[IDX(i,1)]; vl = Ydata[IDX(i-1,1)]; vr = Ydata[IDX(i+1,1)]; w = Ydata[IDX(i,2)]; wl = Ydata[IDX(i-1,2)]; wr = Ydata[IDX(i+1,2)]; /* u_t = du*u_xx + a - (w+1)*u + v*u^2 */ dYdata[IDX(i,0)] = (ul - RCONST(2.0)*u + ur)*uconst + a - (w+RCONST(1.0))*u + v*u*u; /* v_t = dv*v_xx + w*u - v*u^2 */ dYdata[IDX(i,1)] = (vl - RCONST(2.0)*v + vr)*vconst + w*u - v*u*u; /* w_t = dw*w_xx + (b-w)/ep - w*u */ dYdata[IDX(i,2)] = (wl - RCONST(2.0)*w + wr)*wconst + (b-w)/ep - w*u; } /* enforce stationary boundaries */ dYdata[IDX(0,0)] = dYdata[IDX(0,1)] = dYdata[IDX(0,2)] = 0.0; dYdata[IDX(N-1,0)] = dYdata[IDX(N-1,1)] = dYdata[IDX(N-1,2)] = 0.0; return 0; } /* Jacobian routine to compute J(t,y) = df/dy. */ static int Jac(realtype t, N_Vector y, N_Vector fy, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3) { UserData udata = (UserData) user_data; /* access problem data */ SUNMatZero(J); /* Initialize Jacobian to zero */ /* Fill in the Laplace matrix */ if (LaplaceMatrix(RCONST(1.0), J, udata)) { printf("Jacobian calculation error in calling LaplaceMatrix!\n"); return 1; } /* Add in the Jacobian of the reaction terms matrix */ if (ReactionJac(RCONST(1.0), y, J, udata)) { printf("Jacobian calculation error in calling ReactionJac!\n"); return 1; } return 0; } /*------------------------------- * Private helper functions *-------------------------------*/ /* Routine to compute the stiffness matrix from (L*y), scaled by the factor c. We add the result into Jac and do not erase what was already there */ static int LaplaceMatrix(realtype c, SUNMatrix Jac, UserData udata) { sunindextype N = udata->N; /* set shortcuts */ realtype dx = udata->dx; sunindextype i; realtype uconst = c*udata->du/dx/dx; realtype vconst = c*udata->dv/dx/dx; realtype wconst = c*udata->dw/dx/dx; /* iterate over intervals, filling in Jacobian entries */ #pragma omp parallel for default(shared) private(i) schedule(static) num_threads(udata->nthreads) for (i=1; i<N-1; i++) { /* Jacobian of (L*y) at this node */ SM_ELEMENT_B(Jac,IDX(i,0),IDX(i-1,0)) += uconst; SM_ELEMENT_B(Jac,IDX(i,1),IDX(i-1,1)) += vconst; SM_ELEMENT_B(Jac,IDX(i,2),IDX(i-1,2)) += wconst; SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,0)) -= RCONST(2.0)*uconst; SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,1)) -= RCONST(2.0)*vconst; SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,2)) -= RCONST(2.0)*wconst; SM_ELEMENT_B(Jac,IDX(i,0),IDX(i+1,0)) += uconst; SM_ELEMENT_B(Jac,IDX(i,1),IDX(i+1,1)) += vconst; SM_ELEMENT_B(Jac,IDX(i,2),IDX(i+1,2)) += wconst; } return 0; } /* Routine to compute the Jacobian matrix from R(y), scaled by the factor c. We add the result into Jac and do not erase what was already there */ static int ReactionJac(realtype c, N_Vector y, SUNMatrix Jac, UserData udata) { sunindextype N = udata->N; /* set shortcuts */ realtype ep = udata->ep; sunindextype i; realtype u, v, w; realtype *Ydata = N_VGetArrayPointer(y); /* access solution array */ if (check_flag((void *)Ydata, "N_VGetArrayPointer", 0)) return 1; /* iterate over nodes, filling in Jacobian entries */ #pragma omp parallel for default(shared) private(i,u,v,w) schedule(static) num_threads(udata->nthreads) for (i=1; i<N-1; i++) { /* set nodal value shortcuts (shifted index due to start at first interior node) */ u = Ydata[IDX(i,0)]; v = Ydata[IDX(i,1)]; w = Ydata[IDX(i,2)]; /* all vars wrt u */ SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,0)) += c*(RCONST(2.0)*u*v-(w+RCONST(1.0))); SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,0)) += c*(w - RCONST(2.0)*u*v); SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,0)) += c*(-w); /* all vars wrt v */ SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,1)) += c*(u*u); SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,1)) += c*(-u*u); /* all vars wrt w */ SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,2)) += c*(-u); SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,2)) += c*(u); SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,2)) += c*(-RCONST(1.0)/ep - u); } return 0; } /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns a flag so check if flag >= 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_flag(void *flagvalue, const char *funcname, int opt) { int *errflag; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && flagvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return 1; } /* Check if flag < 0 */ else if (opt == 1) { errflag = (int *) flagvalue; if (*errflag < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n", funcname, *errflag); return 1; }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && flagvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return 1; } return 0; } /*---- end of file ----*/
bufradixsort_relocate.h
#ifndef BUFRADIXSORT_RELOCATE_H #define BUFRADIXSORT_RELOCATE_H #include "bufradixsort_common.h" #include <limits.h> #include <stddef.h> #include <stdint.h> #include <string.h> #if EXT_UNIQID(SSE2) == EXT_UNIQID(EXT_STREAM) #include <emmintrin.h> #include <smmintrin.h> #endif /* COPYMASKBUF */ #define COPYMASKBUF CAT(COPYMASKBUF_EXT_, EXT_STREAM) #define COPYMASKBUF_EXT_NONE() do { \ unsigned int i; \ for (i = 0; i < BUFFER_SIZE; i++) \ ASSUME_ALIGNED(copy_point_rst, BUFFER_SIZE)[i] = \ ASSUME_ALIGNED(buf_point_rst, BUFFER_SIZE)[i] ^ ASSUME_ALIGNED(float_mask_rst, BUFFER_SIZE)[i]; \ } while (0) #define COPYMASKBUF_EXT_SSE2() do { \ unsigned int i; \ for (i = 0; i < BUFFER_SIZE/16; i++) \ _mm_stream_si128((__m128i*)copy_point_rst+i, \ _mm_xor_si128(_mm_load_si128((__m128i*)buf_point_rst+i), _mm_load_si128((__m128i*)float_mask_rst+i))); \ } while(0) #if defined(__GNUC__) #pragma GCC optimize("tree-vectorize") #pragma GCC optimize("unroll-loops") #endif static int relocate_float_buf_full(unsigned int first_buf_bkt, unsigned char *restrict buf_point_rst, unsigned int bkt, unsigned char *restrict *restrict copy_points_rst, unsigned int invalid_elems_offset, const unsigned char *restrict float_mask_rst) { unsigned char *copy_point_rst = copy_points_rst[bkt]; copy_points_rst[bkt] = copy_point_rst + BUFFER_SIZE; if (UNLIKELY(bkt == first_buf_bkt)) { unsigned int i; for (i = invalid_elems_offset; i < BUFFER_SIZE; i++) copy_point_rst[i] = buf_point_rst[i] ^ float_mask_rst[i]; return BKT; } else { COPYMASKBUF(); return first_buf_bkt; } } #define RELOCATE_FLOAT_KERNEL(ELEM_SIZE) do { \ unsigned int bkt = *data_cur; \ data_cur += ELEM_SIZE; \ SIZEUTYP(ELEM_SIZE) val; \ memcpy(&val, data_rst, sizeof(val)); \ data_rst += ELEM_SIZE; \ unsigned char *buf_point_rst = buf_points_rst[bkt]; \ memcpy(buf_point_rst, &val, sizeof(val)); \ buf_point_rst += ELEM_SIZE; \ if (((uintptr_t)buf_point_rst & (BUFFER_SIZE-1)) == 0) { \ buf_point_rst -= BUFFER_SIZE; \ if (bkt < BKT>>1) \ first_buf_bkt = relocate_buf_full(first_buf_bkt, buf_point_rst, bkt, \ copy_points_rst, invalid_elems_offset); \ else \ first_buf_bkt = relocate_float_buf_full(first_buf_bkt, buf_point_rst, bkt, \ copy_points_rst, invalid_elems_offset, float_mask_rst); \ } \ buf_points_rst[bkt] = buf_point_rst; \ } while (0) #define RELOCATE_FLOAT_IF_F_DO(ELEM_SIZE) do { \ const unsigned char *data_cur = data_rst + bkt_pos_base + real_pos; \ while (data_rst < data_algn) { \ RELOCATE_FLOAT_KERNEL(ELEM_SIZE); \ } \ while (data_rst < data_end) { \ PREFETCH(data_rst+128, 0, 0); \ ITERARG(UNROLL_RELOCATE, RELOCATE_FLOAT_KERNEL, ELEM_SIZE); \ } \ } while (0) #define RELOCATE_FLOAT_IF_F(ELEM_SIZE) \ IF0(SUB(DIV(INDEX(0, SUPPORTED_FLOAT_BITS_LIST_LEN, SUPPORTED_FLOAT_BITS_LIST), BKT_BIT), ELEM_SIZE), \ RELOCATE_FLOAT_IF_F_DO(ELEM_SIZE), ;) /* * Otherwise, simply relocate. */ #define COPYBUF CAT(COPYBUF_EXT_, EXT_STREAM) #define COPYBUF_EXT_NONE() \ memcpy(ASSUME_ALIGNED(copy_point_rst, BUFFER_SIZE), ASSUME_ALIGNED(buf_point_rst, BUFFER_SIZE), BUFFER_SIZE) #define COPYBUF_EXT_SSE2() \ ITERNUM(DIV(BUFFER_SIZE, 16), COPYBUF_EXT_SSE2_KERNEL) #define COPYBUF_EXT_SSE2_KERNEL(n) \ _mm_stream_si128((__m128i*)copy_point_rst+n, _mm_load_si128((__m128i*)buf_point_rst+n)) #if defined(__GNUC__) #pragma GCC optimize("tree-vectorize") #pragma GCC optimize("unroll-loops") #endif static int relocate_buf_full(unsigned int first_buf_bkt, unsigned char *restrict buf_point_rst, unsigned int bkt, unsigned char *restrict *restrict copy_points_rst, unsigned int invalid_elems_offset) { unsigned char *copy_point_rst = copy_points_rst[bkt]; copy_points_rst[bkt] = copy_point_rst + BUFFER_SIZE; if (UNLIKELY(bkt == first_buf_bkt)) { unsigned int i; for (i = invalid_elems_offset; i < BUFFER_SIZE; i++) copy_point_rst[i] = buf_point_rst[i]; return BKT; } else { COPYBUF(); return first_buf_bkt; } } #define RELOCATE_NONFLOAT_KERNEL(ELEM_SIZE) do { \ unsigned int bkt = *data_cur; \ data_cur += ELEM_SIZE; \ SIZEUTYP(ELEM_SIZE) val; \ memcpy(&val, (SIZEUTYP(ELEM_SIZE)*)data_rst, sizeof(val)); \ data_rst += ELEM_SIZE; \ unsigned char *restrict buf_point_rst = buf_points_rst[bkt]; \ memcpy((SIZEUTYP(ELEM_SIZE)*)buf_point_rst, &val, sizeof(val)); \ buf_point_rst += ELEM_SIZE; \ if (((uintptr_t)buf_point_rst & (BUFFER_SIZE-1)) == 0) { \ buf_point_rst -= BUFFER_SIZE; \ first_buf_bkt = relocate_buf_full(first_buf_bkt, buf_point_rst, bkt, copy_points_rst, invalid_elems_offset); \ } \ buf_points_rst[bkt] = buf_point_rst; \ } while(0) #define RELOCATE_NONFLOAT_IF_F(ELEM_SIZE) do { \ const unsigned char *data_cur = data_rst + bkt_pos_base + real_pos; \ while (data_rst < data_algn) { \ RELOCATE_NONFLOAT_KERNEL(ELEM_SIZE); \ } \ while (data_rst < data_end) { \ PREFETCH(data_rst+128, 0, 0); \ ITERARG(UNROLL_RELOCATE, RELOCATE_NONFLOAT_KERNEL, ELEM_SIZE); \ } \ } while (0) /* * Generic part. */ #define RELOCATE_CASE_E(ELEM_SIZE_LOG) case ELEM_SIZE_LOG: { \ if (float_bits_if_msb) { \ RELOCATE_FLOAT_IF_F(POW(2, ELEM_SIZE_LOG)); \ } else { \ RELOCATE_NONFLOAT_IF_F(POW(2, ELEM_SIZE_LOG)); \ } \ } break static void relocate_data(const unsigned char *data, const unsigned char *data_end, unsigned char *dest, unsigned int elem_size_log, unsigned int bkt_pos_base, unsigned int real_pos, unsigned int float_bits_if_msb, unsigned int bkt_fix_sign, const size_t *histo, unsigned char **copy_points) { #ifdef ALIGNED unsigned char ALIGNED(BUFFER_SIZE) buf[BKT][BUFFER_SIZE]; unsigned char ALIGNED(BUFFER_SIZE) float_mask[BUFFER_SIZE]; #else unsigned char buf_space[BKT*(BUFFER_SIZE+2)]; unsigned char (*buf)[BUFFER_SIZE] = (unsigned char(*)[BUFFER_SIZE])(buf_space + (-(uintptr_t)buf_space & (BUFFER_SIZE-1))); unsigned char (*float_mask)[BUFFER_SIZE] = buf+BKT; #endif unsigned char *buf_points[BKT]; unsigned int first_buf_bkt = BKT; unsigned int invalid_elems_offset = 0; unsigned int bkt; /* * If current thread is sorting to the first area of destination, * even temporalily writing to the preceeding position should be considered as dangerous, * since they could be used by other parts of the program or be protected by segment. * So we must detect writing to the first position. */ { unsigned char *dest_algn = (void*)((uintptr_t)dest & -BUFFER_SIZE); if (dest != dest_algn) { unsigned char *dest_algn_up = dest_algn + BUFFER_SIZE; for (bkt = 0; bkt < BKT; bkt++) { unsigned char *strt_point = copy_points[bkt^bkt_fix_sign]; unsigned char *ends_point = strt_point + histo[bkt^bkt_fix_sign]; if (ends_point >= dest_algn_up) { if (strt_point < dest_algn_up) { first_buf_bkt = bkt^bkt_fix_sign; invalid_elems_offset = strt_point - dest_algn; } break; } } } } /* * Set up buf_points and copy_points (aligned), that is displays of the buffers and the dest resp. * Copypoints are aligned by the number of elements of a buffer. */ for (bkt = 0; bkt < BKT; bkt++) { unsigned char *copy_point = copy_points[bkt]; unsigned char *copy_point_algn = (void*)((uintptr_t)copy_point & -BUFFER_SIZE); int buf_offset = copy_point - copy_point_algn; copy_points[bkt] = copy_point - buf_offset; buf_points[bkt] = buf[bkt] + buf_offset; } /* * Set up float mask. */ memset(float_mask, 0, BUFFER_SIZE); if (float_bits_if_msb) { unsigned int i, j; for (i = 0; i < BUFFER_SIZE; i += 1 << elem_size_log) for (j = 0; j < float_bits_if_msb/BKT_BIT-1; j++) if (j != real_pos) float_mask[i + bkt_pos_base + j] = BKT-1; } /* * Run kernel. */ { const unsigned char *restrict data_rst = data; unsigned char *restrict *restrict buf_points_rst = buf_points; unsigned char *restrict *restrict copy_points_rst = copy_points; const unsigned char *data_algn = data + ((((data_end - data) >> elem_size_log) % UNROLL_RELOCATE) << elem_size_log); const unsigned char *restrict float_mask_rst = float_mask; switch (elem_size_log) { ITERNUM(SUCC(ELEM_SIZE_LOG_MAX), RELOCATE_CASE_E); } } #ifdef _OPENMP #pragma omp barrier #endif for (bkt = 0; bkt < BKT; bkt++) { unsigned char *ends_point = copy_points[bkt] + (buf_points[bkt] - buf[bkt]); unsigned char *strt_point = ends_point - histo[bkt]; unsigned char *ends_point_algn = (void*)((uintptr_t)ends_point & -BUFFER_SIZE); unsigned char *strt_point_algn = (void*)((uintptr_t)strt_point & -BUFFER_SIZE); unsigned char *copy_point; unsigned char *buf_point; if (strt_point_algn == ends_point_algn) { copy_point = strt_point; buf_point = buf[bkt] + (strt_point - strt_point_algn); } else { copy_point = ends_point_algn; buf_point = buf[bkt]; } while (copy_point < ends_point) *copy_point++ = *buf_point++; } } #endif /* BUFRADIXSORT_RELOCATE_H */
sub_kernel.c
#include <stdio.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #define MATSIZE 1000 struct { int nsize; float a2[MATSIZE][MATSIZE]; /* address of matrix.a2 == &matrix.a2[0][0] */ float b2[MATSIZE][MATSIZE]; float c2[MATSIZE][MATSIZE]; } matrix; void set_array() { int i, j, nsize; nsize = matrix.nsize = MATSIZE; // printf("<set_array> nsize=%d \n", nsize); #pragma omp parallel #pragma omp for for (i=0; i<nsize; i++){ for (j=0; j<nsize; j++){ matrix.a2[i][j] = sin((float)j/(float)nsize); matrix.b2[i][j] = cos((float)j/(float)nsize); matrix.c2[i][j] = 0.0; } } } void sub_kernel() { int i, j, k, nsize; float c1,c2,c3; nsize = matrix.nsize = MATSIZE; // printf("<sub_kernel> computing. nsize=%d\n", nsize); #pragma omp parallel #pragma omp for for (i=0; i<nsize; i++){ for (j=0; j<nsize; j++){ c1=0.0; for (k=0; k<nsize; k++){ c2=matrix.a2[i][k] * matrix.a2[j][k]; c3=matrix.b2[i][k] * matrix.b2[j][k]; c1=c1 + c2+c3; } matrix.c2[i][j] = matrix.c2[i][j] + c1/(float)nsize; } } }
estimator.h
// Copyright (C) 2013 The Regents of the University of California (Regents). // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // // * Neither the name of The Regents or University of California nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Please contact the author of this library if you have any questions. // Author: Chris Sweeney (cmsweeney@cs.ucsb.edu) #ifndef THEIA_SOLVERS_ESTIMATOR_H_ #define THEIA_SOLVERS_ESTIMATOR_H_ //#include <glog/logging.h> /*#ifdef THEIA_USE_OPENMP #include <omp.h> #endif*/ #include <vector> #include <cstdlib> namespace theia { // Templated class for estimating a model for RANSAC. This class is purely a // virtual class and should be implemented for the specific task that RANSAC is // being used for. Two methods must be implemented: EstimateModel and Error. All // other methods are optional, but will likely enhance the quality of the RANSAC // output. // // NOTE: RANSAC, ARRSAC, and other solvers work best if Datum and Model are // lightweight classes or structs. template <class Datum, class Model> class Estimator { public: Estimator() {} virtual ~Estimator() {} // Given a set of data points, estimate the model. Users should implement this // function appropriately for the task being solved. Returns true for // successful model estimation (and outputs model), false for failed // estimation. Typically, this is a minimal set, but it is not required to be. virtual bool EstimateModel(const std::vector<Datum>& data, std::vector<Model>* model) const = 0; // Estimate a model from a non-minimal sampling of the data. E.g. for a line, // use SVD on a set of points instead of constructing a line from two points. // By default, this simply implements the minimal case. virtual bool EstimateModelNonminimal(const std::vector<Datum>& data, std::vector<Model>* model) const { return EstimateModel(data, model); } // Refine the model based on an updated subset of data, and a pre-computed // model. Can be optionally implemented. virtual bool RefineModel(const std::vector<Datum>& data, Model* model) const { return true; } // Given a model and a data point, calculate the error. Users should implement // this function appropriately for the task being solved. virtual double Error(const Datum& data, const Model& model) const = 0; virtual std::vector<double> Residuals(const std::vector<Datum>& data, const Model& model) const { std::vector<double> residuals(data.size()); //#pragma omp parallel for for (size_t i = 0; i < data.size(); i++) { residuals[i] = Error(data[i], model); } return residuals; } // Returns the set inliers of the data set based on the error threshold // provided. std::vector<bool> GetInliers(const std::vector<Datum>& data, const Model& model, double error_threshold) const { std::vector<bool> inliers; for(size_t i = 0; i < data.size(); i++) inliers.push_back(Error(data[i], model) < error_threshold); return inliers; } // Returns the number inliers of the data set based on the error threshold // provided. int GetNumInliers(const std::vector<Datum>& data, const Model& model, double error_threshold) const { int num_inliers = 0; for(size_t i = 0; i < data.size(); i++) if (Error(data[i], model) < error_threshold) num_inliers++; return num_inliers; } // Enable a quick check to see if the model is valid. This can be a geometric // check or some other verification of the model structure. virtual bool ValidModel(const Model& model) const { return true; } }; } // namespace theia #endif // THEIA_SOLVERS_ESTIMATOR_H_
bit_vector_functions.h
#ifndef BIT_VECTOR_FUNCTIONS_H #define BIT_VECTOR_FUNCTIONS_H #include <vector> #include <bitset> #include "helper/confusion.h" #include "config.h" #include "io_and_allocation.h" #include "updates_and_measures.h" using std::vector; template<typename bit_vector_t, typename index_t> size_t computeHammingDistanceCPU( const vector<bit_vector_t> &Ab, const vector<bit_vector_t> &Bb, const vector<bit_vector_t> &Cb, const index_t height, const index_t width) { size_t error = 0; #pragma omp parallel for reduction(+:error) for(index_t j=0; j < width; ++j) { uint32_t B_j = Bb[j]; for(index_t i=0; i < height; ++i) { const int product = (Ab[i] & B_j) ? 1 : 0; const index_t vecId = i / 32 * width + j; const index_t vecLane = i % 32; const int C_ij = (Cb[vecId] >> vecLane) & 1; error += product ^ C_ij; } } return error; } template<typename bit_vector_t> int nonzeroDimension(vector<bit_vector_t>& Ab) { bit_vector_t columns = 0; for(auto& a : Ab) columns |= a; std::bitset<std::numeric_limits<bit_vector_t>::digits> bits(columns); return bits.count(); } template<typename bit_vector_t, typename index_t> confusion_matrix computeErrorsCPU( const vector<bit_vector_t> &Ab, const vector<bit_vector_t> &Bb, const vector<bit_vector_t> &Cb, const index_t height, const index_t width) { size_t true_positives = 0; size_t true_negatives = 0; size_t false_positives = 0; size_t false_negatives = 0; #pragma omp parallel for reduction(+:true_positives) \ reduction(+:true_negatives) \ reduction(+:false_positives) \ reduction(+:false_negatives) for(index_t j=0; j < width; ++j) { uint32_t B_j = Bb[j]; for(index_t i=0; i < height; ++i) { const int product = (Ab[i] & B_j) ? 1 : 0; const index_t vecId = i / 32 * width + j; const index_t vecLane = i % 32; const int C_ij = (Cb[vecId] >> vecLane) & 1; true_positives += C_ij & product; true_negatives += !(C_ij | product); false_positives += (!C_ij) & product; false_negatives += C_ij & !product; } } return confusion_matrix(true_positives, true_negatives, false_positives, false_negatives); } template<typename bit_vector_t, typename index_t> size_t computeTruePositiveCPU( const vector<bit_vector_t> &Ab, const vector<bit_vector_t> &Bb, const vector<bit_vector_t> &Cb, const index_t height, const index_t width) { size_t true_positives = 0; #pragma omp parallel for reduction(+:true_positives) for(index_t j=0; j < width; ++j) { uint32_t B_j = Bb[j]; for(index_t i=0; i < height; ++i) { const int product = (Ab[i] & B_j) ? 1 : 0; const index_t vecId = i / 32 * width + j; const index_t vecLane = i % 32; const int C_ij = (Cb[vecId] >> vecLane) & 1; if(product & C_ij) true_positives++; } } return true_positives; } template<typename bit_vector_t, typename index_t> float computeJaccardCPU( const vector<bit_vector_t> &Ab, const vector<bit_vector_t> &Bb, const vector<bit_vector_t> &Cb, const index_t height, const index_t width) { float jaccard = 0; #pragma omp parallel for reduction(+:jaccard) for(index_t j=0; j < width; ++j) { uint32_t B_j = Bb[j]; size_t true_positives = 0; size_t false_positives = 0; size_t false_negatives = 0; for(index_t i=0; i < height; ++i) { const int product = (Ab[i] & B_j) ? 1 : 0; const index_t vecId = i / 32 * width + j; const index_t vecLane = i % 32; const int C_ij = (Cb[vecId] >> vecLane) & 1; if(product) { if(C_ij) true_positives++; else false_positives++; } else { if(C_ij) false_negatives++; } } jaccard += (float) true_positives / (true_positives + false_positives + false_negatives); } return jaccard; } template<typename bit_factor_t, typename bit_matrix_t, typename index_t, typename error_t> error_t computeDistanceCPU( const vector<bit_factor_t> &Ab, const vector<bit_factor_t> &Bb, const vector<bit_matrix_t> &Cb, const index_t height, const index_t width, const error_t weight) { error_t error = 0; #pragma omp parallel for reduction(+:error) for(index_t i=0; i < height; ++i) { uint32_t A_i = Ab[i]; for(index_t j=0; j < width; ++j) { const int product = (A_i & Bb[j]) ? 1 : 0; const index_t vecId = i / 32 * width + j; const index_t vecLane = i % 32; const int C_ij = (Cb[vecId] >> vecLane) & 1; error += error_measure(product, C_ij, weight); } } return error; } template<typename bit_vector_t, typename index_t, typename error_t = float> vector<error_t> computeDensitiesRows( const vector<bit_vector_t> &Cb, const index_t height, const index_t width) { vector<error_t> density_rows(height); #pragma omp parallel for for(index_t i=0; i<height; ++i) { size_t nonZeroCount = 0; for(index_t j=0; j<width; ++j) { const index_t vecId = i / 32 * width + j; const index_t vecLane = i % 32; const int C_ij = (Cb[vecId] >> vecLane) & 1; nonZeroCount += C_ij; } density_rows[i] = (error_t) nonZeroCount / width; } return density_rows; } template<typename bit_vector_t, typename index_t, typename error_t = float> vector<error_t> computeDensitiesCols( const vector<bit_vector_t> &Cb, const index_t height, const index_t width) { vector<error_t> density_cols(width); #pragma omp parallel for for(index_t j=0; j<width; ++j) { size_t nonZeroCount = 0; for(index_t i=0; i<height; ++i) { const index_t vecId = i / 32 * width + j; const index_t vecLane = i % 32; const int C_ij = (Cb[vecId] >> vecLane) & 1; nonZeroCount += C_ij; } density_cols[j] = (error_t) nonZeroCount / height; } return density_cols; } template<typename bit_vector_t, typename index_t, typename error_t = float> vector<error_t> computeInverseDensitiesRows( const vector<bit_vector_t> &Cb, const index_t height, const index_t width) { vector<error_t> inverse_density_rows(height); #pragma omp parallel for for(index_t i=0; i<height; ++i) { size_t nonZeroCount = 0; for(index_t j=0; j<width; ++j) { const index_t vecId = i / 32 * width + j; const index_t vecLane = i % 32; const int C_ij = (Cb[vecId] >> vecLane) & 1; nonZeroCount += C_ij; } if(nonZeroCount == 0) nonZeroCount++; inverse_density_rows[i] = (error_t) width / nonZeroCount; } return inverse_density_rows; } template<typename bit_vector_t, typename index_t, typename error_t = float> vector<error_t> computeInverseDensitiesCols( const vector<bit_vector_t> &Cb, const index_t height, const index_t width) { vector<error_t> inverse_density_cols(width); #pragma omp parallel for for(index_t j=0; j<width; ++j) { size_t nonZeroCount = 0; for(index_t i=0; i<height; ++i) { const index_t vecId = i / 32 * width + j; const index_t vecLane = i % 32; const int C_ij = (Cb[vecId] >> vecLane) & 1; nonZeroCount += C_ij; } if(nonZeroCount == 0) nonZeroCount++; inverse_density_cols[j] = (error_t) height / nonZeroCount; } return inverse_density_cols; } template<typename bit_vector_t, typename index_t> void updateWholeColumn( vector<bit_vector_t> &Ab, const index_t size_A, const uint8_t factorDim, const uint8_t column, const float density, const uint32_t seed) { updateColumnPart(Ab, size_A, factorDim, column, density, 0, size_A, seed); } template<typename bit_vector_t, typename index_t> void updateColumnPart( vector<bit_vector_t> &Ab, const index_t size_A, const uint8_t factorDim, const uint8_t column, const float density, const index_t startline, const index_t numlines, const uint32_t seed) { const double threshold = getInitChance(density, factorDim); #pragma omp for for (index_t id = 0; id < numlines; ++id) { const index_t i = (startline + id) % size_A; fast_kiss_state32_t state; state = get_initial_fast_kiss_state32(seed + i); const bool set_one = fast_kiss32(state) < threshold * UINT32_MAX; if (set_one) Ab[i] |= 1 << column; else //set 0 Ab[i] &= ~(1 << column); } } template<bool transpose, typename bit_vector_t, typename index_t> confusion_matrix optimizeWholeColumn( vector<bit_vector_t> &Ab, const index_t size_A, const vector<bit_vector_t> &Bb, const index_t size_B, const vector<bit_vector_t> &Cb, const uint8_t factorDim, const uint8_t k) { confusion_matrix confusion_new; #pragma omp for for (index_t i = 0; i < size_A; ++i) { const bit_vector_t A_i_0 = Ab[i] & ~(1 << k); const bit_vector_t A_i_1 = Ab[i] | (1 << k); confusion_matrix confusion_0; confusion_matrix confusion_1; for(index_t j=0; j < size_B; ++j) { const index_t vecId = transpose ? j / 32 * size_A + i : i / 32 * size_B + j; const index_t vecLane = transpose ? j % 32 : i % 32; const int C_ij = (Cb[vecId] >> vecLane) & 1; const int product_0 = (A_i_0 & Bb[j]) ? 1 : 0; const int product_1 = (A_i_1 & Bb[j]) ? 1 : 0; confusion_0.TP += C_ij & product_0; confusion_1.TP += C_ij & product_1; confusion_0.FN += C_ij & !product_0; confusion_1.FN += C_ij & !product_1; confusion_0.FP += (!C_ij) & product_0; confusion_1.FP += (!C_ij) & product_1; } if(confusion_0.total_error() <= confusion_1.total_error()) { Ab[i] = A_i_0; confusion_new.TP += confusion_0.TP; confusion_new.FN += confusion_0.FN; confusion_new.FP += confusion_0.FP; } else { Ab[i] = A_i_1; confusion_new.TP += confusion_1.TP; confusion_new.FN += confusion_1.FN; confusion_new.FP += confusion_1.FP; } } return confusion_new; } template<bool transpose, typename bit_vector_t, typename index_t> confusion_matrix updateLinesJaccardCPU(vector<bit_vector_t> &Ab, const index_t size_A, const vector<bit_vector_t> &Bb, const index_t size_B, const vector<bit_vector_t> &Cb, const uint8_t factorDim, const index_t startline, const index_t numlines, const uint32_t seed, const float temperature, const float flipManyChance, const uint32_t flipManyDepth, const confusion_matrix confusion) { confusion_matrix confusion_update; #pragma omp for for(index_t id=0; id < numlines; ++id) { const index_t i = (startline + id) % size_A; fast_kiss_state32_t state; state = get_initial_fast_kiss_state32(seed + id); const bit_vector_t A_i = Ab[i]; const bit_vector_t A_i_draw = get_flip_mask_many(factorDim, state, flipManyDepth); const bit_vector_t A_i_flip = A_i ^ A_i_draw; confusion_matrix confusion_old; confusion_matrix confusion_draw; confusion_matrix confusion_flip; for(index_t j=0; j < size_B; ++j) { const index_t vecId = transpose ? j / 32 * size_A + i : i / 32 * size_B + j; const index_t vecLane = transpose ? j % 32 : i % 32; const int C_ij = (Cb[vecId] >> vecLane) & 1; const int product_old = (A_i & Bb[j]) ? 1 : 0; const int product_draw = (A_i_draw & Bb[j]) ? 1 : 0; const int product_flip = (A_i_flip & Bb[j]) ? 1 : 0; confusion_old.TP += C_ij & product_old; confusion_draw.TP += C_ij & product_draw; confusion_flip.TP += C_ij & product_flip; confusion_old.FN += C_ij & !product_old; confusion_draw.FN += C_ij & !product_draw; confusion_flip.FN += C_ij & !product_flip; confusion_old.FP += (!C_ij) & product_old; confusion_draw.FP += (!C_ij) & product_draw; confusion_flip.FP += (!C_ij) & product_flip; } const size_t all_tp_draw = confusion.TP - confusion_old.TP + confusion_draw.TP; const size_t all_tp_flip = confusion.TP - confusion_old.TP + confusion_flip.TP; const float jaccard_old = 1.0f * confusion.TP / (confusion.TP + 3*confusion_old.FN + confusion_old.FP); const float jaccard_draw = 1.0f * all_tp_draw / (all_tp_draw + 3*confusion_draw.FN + confusion_draw.FP); const float jaccard_flip = 1.0f * all_tp_flip / (all_tp_flip + 3*confusion_flip.FN + confusion_flip.FP); bit_vector_t A_i_new = A_i_draw; float jaccard_new = jaccard_draw; confusion_matrix& confusion_new = confusion_draw; if(jaccard_draw > jaccard_old) { if(jaccard_flip > jaccard_draw) { A_i_new = A_i_flip; jaccard_new = jaccard_flip; confusion_new = confusion_flip; } } else { if(jaccard_flip > jaccard_old) { A_i_new = A_i_flip; jaccard_new = jaccard_flip; confusion_new = confusion_flip; } else { const uint32_t coin = fast_kiss32(state) % 2; if(coin) { A_i_new = A_i_flip; jaccard_new = jaccard_flip; confusion_new = confusion_flip; } } } if (metro(state, jaccard_old - jaccard_new, temperature)) { Ab[i] = A_i_new; confusion_update.TP += confusion_new.TP - confusion_old.TP; confusion_update.FP += confusion_new.FP - confusion_old.FP; confusion_update.FN += confusion_new.FN - confusion_old.FN; } } return confusion_update; } template<bool transpose, typename bit_vector_t, typename index_t, typename error_t> int vectorMatrixMultCompareLineCPU(vector<bit_vector_t> &Ab, const index_t size_A, const vector<bit_vector_t> &Bb, const index_t size_B, const vector<bit_vector_t> &Cb, const uint8_t factorDim, const index_t startline, const index_t numlines, const uint32_t seed, const float temperature, const float flipManyChance, const uint32_t flipManyDepth, const error_t weight) { error_t error_update = 0; #pragma omp for // #pragma omp parallel for reduction(+:error_update) for(index_t id=0; id < numlines; ++id) { const index_t i = (startline + id) % size_A; fast_kiss_state32_t state; state = get_initial_fast_kiss_state32(seed + id); const bit_vector_t A_i = Ab[i]; bit_vector_t A_i_changed = Ab[i] ^ get_flip_mask(factorDim, state, flipManyChance, flipManyDepth); error_t error = 0; for(index_t j=0; j < size_B; ++j) { const index_t vecId = transpose ? j / 32 * size_A + i : i / 32 * size_B + j; const index_t vecLane = transpose ? j % 32 : i % 32; const int C_ij = (Cb[vecId] >> vecLane) & 1; const int product_old = (A_i & Bb[j]) ? 1 : 0; const int product_new = (A_i_changed & Bb[j]) ? 1 : 0; error += error_measure(product_new, C_ij, weight) - error_measure(product_old, C_ij, weight); } if (metro(state, error, temperature, size_B)) { Ab[i] = A_i_changed; error_update += error; } } return error_update; } template <typename index_t> struct coo { coo(index_t x, index_t y) : x_{x}, y_{y} {} index_t x_; index_t y_; }; template <typename bit_vector_t, typename index_t> vector<coo<index_t>> computeProductCOO( const vector<bit_vector_t> &Ab, const vector<bit_vector_t> &Bb, const index_t height, const index_t width) { vector<coo<index_t>> C; #pragma omp parallel for ordered schedule(static,1) for(index_t i=0; i < height; ++i) { bit_vector_t row = Ab[i]; vector<coo<index_t>> Ci; for(index_t j=0; j < width; ++j) { if(row & Bb[j]) Ci.emplace_back(i,j); } #pragma omp ordered C.insert(C.end(), Ci.begin(), Ci.end()); } return C; } #endif
gimplify.c
/* Tree lowering pass. This pass converts the GENERIC functions-as-trees tree representation into the GIMPLE form. Copyright (C) 2002-2020 Free Software Foundation, Inc. Major work done by Sebastian Pop <s.pop@laposte.net>, Diego Novillo <dnovillo@redhat.com> and Jason Merrill <jason@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "target.h" #include "rtl.h" #include "tree.h" #include "memmodel.h" #include "tm_p.h" #include "gimple.h" #include "gimple-predict.h" #include "tree-pass.h" /* FIXME: only for PROP_gimple_any */ #include "ssa.h" #include "cgraph.h" #include "tree-pretty-print.h" #include "diagnostic-core.h" #include "alias.h" #include "fold-const.h" #include "calls.h" #include "varasm.h" #include "stmt.h" #include "expr.h" #include "gimple-fold.h" #include "tree-eh.h" #include "gimplify.h" #include "gimple-iterator.h" #include "stor-layout.h" #include "print-tree.h" #include "tree-iterator.h" #include "tree-inline.h" #include "langhooks.h" #include "tree-cfg.h" #include "tree-ssa.h" #include "omp-general.h" #include "omp-low.h" #include "gimple-low.h" #include "gomp-constants.h" #include "splay-tree.h" #include "gimple-walk.h" #include "langhooks-def.h" /* FIXME: for lhd_set_decl_assembler_name */ #include "builtins.h" #include "stringpool.h" #include "attribs.h" #include "asan.h" #include "dbgcnt.h" #include "omp-offload.h" #include "context.h" /* Hash set of poisoned variables in a bind expr. */ static hash_set<tree> *asan_poisoned_variables = NULL; enum gimplify_omp_var_data { GOVD_SEEN = 0x000001, GOVD_EXPLICIT = 0x000002, GOVD_SHARED = 0x000004, GOVD_PRIVATE = 0x000008, GOVD_FIRSTPRIVATE = 0x000010, GOVD_LASTPRIVATE = 0x000020, GOVD_REDUCTION = 0x000040, GOVD_LOCAL = 0x00080, GOVD_MAP = 0x000100, GOVD_DEBUG_PRIVATE = 0x000200, GOVD_PRIVATE_OUTER_REF = 0x000400, GOVD_LINEAR = 0x000800, GOVD_ALIGNED = 0x001000, /* Flag for GOVD_MAP: don't copy back. */ GOVD_MAP_TO_ONLY = 0x002000, /* Flag for GOVD_LINEAR or GOVD_LASTPRIVATE: no outer reference. */ GOVD_LINEAR_LASTPRIVATE_NO_OUTER = 0x004000, GOVD_MAP_0LEN_ARRAY = 0x008000, /* Flag for GOVD_MAP, if it is always, to or always, tofrom mapping. */ GOVD_MAP_ALWAYS_TO = 0x010000, /* Flag for shared vars that are or might be stored to in the region. */ GOVD_WRITTEN = 0x020000, /* Flag for GOVD_MAP, if it is a forced mapping. */ GOVD_MAP_FORCE = 0x040000, /* Flag for GOVD_MAP: must be present already. */ GOVD_MAP_FORCE_PRESENT = 0x080000, /* Flag for GOVD_MAP: only allocate. */ GOVD_MAP_ALLOC_ONLY = 0x100000, /* Flag for GOVD_MAP: only copy back. */ GOVD_MAP_FROM_ONLY = 0x200000, GOVD_NONTEMPORAL = 0x400000, /* Flag for GOVD_LASTPRIVATE: conditional modifier. */ GOVD_LASTPRIVATE_CONDITIONAL = 0x800000, GOVD_CONDTEMP = 0x1000000, /* Flag for GOVD_REDUCTION: inscan seen in {in,ex}clusive clause. */ GOVD_REDUCTION_INSCAN = 0x2000000, /* Flag for GOVD_MAP: (struct) vars that have pointer attachments for fields. */ GOVD_MAP_HAS_ATTACHMENTS = 8388608, GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LINEAR | GOVD_LOCAL) }; enum omp_region_type { ORT_WORKSHARE = 0x00, ORT_TASKGROUP = 0x01, ORT_SIMD = 0x04, ORT_PARALLEL = 0x08, ORT_COMBINED_PARALLEL = ORT_PARALLEL | 1, ORT_TASK = 0x10, ORT_UNTIED_TASK = ORT_TASK | 1, ORT_TASKLOOP = ORT_TASK | 2, ORT_UNTIED_TASKLOOP = ORT_UNTIED_TASK | 2, ORT_TEAMS = 0x20, ORT_COMBINED_TEAMS = ORT_TEAMS | 1, ORT_HOST_TEAMS = ORT_TEAMS | 2, ORT_COMBINED_HOST_TEAMS = ORT_COMBINED_TEAMS | 2, /* Data region. */ ORT_TARGET_DATA = 0x40, /* Data region with offloading. */ ORT_TARGET = 0x80, ORT_COMBINED_TARGET = ORT_TARGET | 1, ORT_IMPLICIT_TARGET = ORT_TARGET | 2, /* OpenACC variants. */ ORT_ACC = 0x100, /* A generic OpenACC region. */ ORT_ACC_DATA = ORT_ACC | ORT_TARGET_DATA, /* Data construct. */ ORT_ACC_PARALLEL = ORT_ACC | ORT_TARGET, /* Parallel construct */ ORT_ACC_KERNELS = ORT_ACC | ORT_TARGET | 2, /* Kernels construct. */ ORT_ACC_SERIAL = ORT_ACC | ORT_TARGET | 4, /* Serial construct. */ ORT_ACC_HOST_DATA = ORT_ACC | ORT_TARGET_DATA | 2, /* Host data. */ /* Dummy OpenMP region, used to disable expansion of DECL_VALUE_EXPRs in taskloop pre body. */ ORT_NONE = 0x200 }; /* Gimplify hashtable helper. */ struct gimplify_hasher : free_ptr_hash <elt_t> { static inline hashval_t hash (const elt_t *); static inline bool equal (const elt_t *, const elt_t *); }; struct gimplify_ctx { struct gimplify_ctx *prev_context; vec<gbind *> bind_expr_stack; tree temps; gimple_seq conditional_cleanups; tree exit_label; tree return_temp; vec<tree> case_labels; hash_set<tree> *live_switch_vars; /* The formal temporary table. Should this be persistent? */ hash_table<gimplify_hasher> *temp_htab; int conditions; unsigned into_ssa : 1; unsigned allow_rhs_cond_expr : 1; unsigned in_cleanup_point_expr : 1; unsigned keep_stack : 1; unsigned save_stack : 1; unsigned in_switch_expr : 1; }; enum gimplify_defaultmap_kind { GDMK_SCALAR, GDMK_AGGREGATE, GDMK_ALLOCATABLE, GDMK_POINTER }; struct gimplify_omp_ctx { struct gimplify_omp_ctx *outer_context; splay_tree variables; hash_set<tree> *privatized_types; tree clauses; /* Iteration variables in an OMP_FOR. */ vec<tree> loop_iter_var; location_t location; enum omp_clause_default_kind default_kind; enum omp_region_type region_type; enum tree_code code; bool combined_loop; bool distribute; bool target_firstprivatize_array_bases; bool add_safelen1; bool order_concurrent; int defaultmap[4]; }; static struct gimplify_ctx *gimplify_ctxp; static struct gimplify_omp_ctx *gimplify_omp_ctxp; static bool in_omp_construct; /* Forward declaration. */ static enum gimplify_status gimplify_compound_expr (tree *, gimple_seq *, bool); static hash_map<tree, tree> *oacc_declare_returns; static enum gimplify_status gimplify_expr (tree *, gimple_seq *, gimple_seq *, bool (*) (tree), fallback_t, bool); /* Shorter alias name for the above function for use in gimplify.c only. */ static inline void gimplify_seq_add_stmt (gimple_seq *seq_p, gimple *gs) { gimple_seq_add_stmt_without_update (seq_p, gs); } /* Append sequence SRC to the end of sequence *DST_P. If *DST_P is NULL, a new sequence is allocated. This function is similar to gimple_seq_add_seq, but does not scan the operands. During gimplification, we need to manipulate statement sequences before the def/use vectors have been constructed. */ static void gimplify_seq_add_seq (gimple_seq *dst_p, gimple_seq src) { gimple_stmt_iterator si; if (src == NULL) return; si = gsi_last (*dst_p); gsi_insert_seq_after_without_update (&si, src, GSI_NEW_STMT); } /* Pointer to a list of allocated gimplify_ctx structs to be used for pushing and popping gimplify contexts. */ static struct gimplify_ctx *ctx_pool = NULL; /* Return a gimplify context struct from the pool. */ static inline struct gimplify_ctx * ctx_alloc (void) { struct gimplify_ctx * c = ctx_pool; if (c) ctx_pool = c->prev_context; else c = XNEW (struct gimplify_ctx); memset (c, '\0', sizeof (*c)); return c; } /* Put gimplify context C back into the pool. */ static inline void ctx_free (struct gimplify_ctx *c) { c->prev_context = ctx_pool; ctx_pool = c; } /* Free allocated ctx stack memory. */ void free_gimplify_stack (void) { struct gimplify_ctx *c; while ((c = ctx_pool)) { ctx_pool = c->prev_context; free (c); } } /* Set up a context for the gimplifier. */ void push_gimplify_context (bool in_ssa, bool rhs_cond_ok) { struct gimplify_ctx *c = ctx_alloc (); c->prev_context = gimplify_ctxp; gimplify_ctxp = c; gimplify_ctxp->into_ssa = in_ssa; gimplify_ctxp->allow_rhs_cond_expr = rhs_cond_ok; } /* Tear down a context for the gimplifier. If BODY is non-null, then put the temporaries into the outer BIND_EXPR. Otherwise, put them in the local_decls. BODY is not a sequence, but the first tuple in a sequence. */ void pop_gimplify_context (gimple *body) { struct gimplify_ctx *c = gimplify_ctxp; gcc_assert (c && (!c->bind_expr_stack.exists () || c->bind_expr_stack.is_empty ())); c->bind_expr_stack.release (); gimplify_ctxp = c->prev_context; if (body) declare_vars (c->temps, body, false); else record_vars (c->temps); delete c->temp_htab; c->temp_htab = NULL; ctx_free (c); } /* Push a GIMPLE_BIND tuple onto the stack of bindings. */ static void gimple_push_bind_expr (gbind *bind_stmt) { gimplify_ctxp->bind_expr_stack.reserve (8); gimplify_ctxp->bind_expr_stack.safe_push (bind_stmt); } /* Pop the first element off the stack of bindings. */ static void gimple_pop_bind_expr (void) { gimplify_ctxp->bind_expr_stack.pop (); } /* Return the first element of the stack of bindings. */ gbind * gimple_current_bind_expr (void) { return gimplify_ctxp->bind_expr_stack.last (); } /* Return the stack of bindings created during gimplification. */ vec<gbind *> gimple_bind_expr_stack (void) { return gimplify_ctxp->bind_expr_stack; } /* Return true iff there is a COND_EXPR between us and the innermost CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */ static bool gimple_conditional_context (void) { return gimplify_ctxp->conditions > 0; } /* Note that we've entered a COND_EXPR. */ static void gimple_push_condition (void) { #ifdef ENABLE_GIMPLE_CHECKING if (gimplify_ctxp->conditions == 0) gcc_assert (gimple_seq_empty_p (gimplify_ctxp->conditional_cleanups)); #endif ++(gimplify_ctxp->conditions); } /* Note that we've left a COND_EXPR. If we're back at unconditional scope now, add any conditional cleanups we've seen to the prequeue. */ static void gimple_pop_condition (gimple_seq *pre_p) { int conds = --(gimplify_ctxp->conditions); gcc_assert (conds >= 0); if (conds == 0) { gimplify_seq_add_seq (pre_p, gimplify_ctxp->conditional_cleanups); gimplify_ctxp->conditional_cleanups = NULL; } } /* A stable comparison routine for use with splay trees and DECLs. */ static int splay_tree_compare_decl_uid (splay_tree_key xa, splay_tree_key xb) { tree a = (tree) xa; tree b = (tree) xb; return DECL_UID (a) - DECL_UID (b); } /* Create a new omp construct that deals with variable remapping. */ static struct gimplify_omp_ctx * new_omp_context (enum omp_region_type region_type) { struct gimplify_omp_ctx *c; c = XCNEW (struct gimplify_omp_ctx); c->outer_context = gimplify_omp_ctxp; c->variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0); c->privatized_types = new hash_set<tree>; c->location = input_location; c->region_type = region_type; if ((region_type & ORT_TASK) == 0) c->default_kind = OMP_CLAUSE_DEFAULT_SHARED; else c->default_kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED; c->defaultmap[GDMK_SCALAR] = GOVD_MAP; c->defaultmap[GDMK_AGGREGATE] = GOVD_MAP; c->defaultmap[GDMK_ALLOCATABLE] = GOVD_MAP; c->defaultmap[GDMK_POINTER] = GOVD_MAP; return c; } /* Destroy an omp construct that deals with variable remapping. */ static void delete_omp_context (struct gimplify_omp_ctx *c) { splay_tree_delete (c->variables); delete c->privatized_types; c->loop_iter_var.release (); XDELETE (c); } static void omp_add_variable (struct gimplify_omp_ctx *, tree, unsigned int); static bool omp_notice_variable (struct gimplify_omp_ctx *, tree, bool); /* Both gimplify the statement T and append it to *SEQ_P. This function behaves exactly as gimplify_stmt, but you don't have to pass T as a reference. */ void gimplify_and_add (tree t, gimple_seq *seq_p) { gimplify_stmt (&t, seq_p); } /* Gimplify statement T into sequence *SEQ_P, and return the first tuple in the sequence of generated tuples for this statement. Return NULL if gimplifying T produced no tuples. */ static gimple * gimplify_and_return_first (tree t, gimple_seq *seq_p) { gimple_stmt_iterator last = gsi_last (*seq_p); gimplify_and_add (t, seq_p); if (!gsi_end_p (last)) { gsi_next (&last); return gsi_stmt (last); } else return gimple_seq_first_stmt (*seq_p); } /* Returns true iff T is a valid RHS for an assignment to an un-renamed LHS, or for a call argument. */ static bool is_gimple_mem_rhs (tree t) { /* If we're dealing with a renamable type, either source or dest must be a renamed variable. */ if (is_gimple_reg_type (TREE_TYPE (t))) return is_gimple_val (t); else return is_gimple_val (t) || is_gimple_lvalue (t); } /* Return true if T is a CALL_EXPR or an expression that can be assigned to a temporary. Note that this predicate should only be used during gimplification. See the rationale for this in gimplify_modify_expr. */ static bool is_gimple_reg_rhs_or_call (tree t) { return (get_gimple_rhs_class (TREE_CODE (t)) != GIMPLE_INVALID_RHS || TREE_CODE (t) == CALL_EXPR); } /* Return true if T is a valid memory RHS or a CALL_EXPR. Note that this predicate should only be used during gimplification. See the rationale for this in gimplify_modify_expr. */ static bool is_gimple_mem_rhs_or_call (tree t) { /* If we're dealing with a renamable type, either source or dest must be a renamed variable. */ if (is_gimple_reg_type (TREE_TYPE (t))) return is_gimple_val (t); else return (is_gimple_val (t) || is_gimple_lvalue (t) || TREE_CLOBBER_P (t) || TREE_CODE (t) == CALL_EXPR); } /* Create a temporary with a name derived from VAL. Subroutine of lookup_tmp_var; nobody else should call this function. */ static inline tree create_tmp_from_val (tree val) { /* Drop all qualifiers and address-space information from the value type. */ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (val)); tree var = create_tmp_var (type, get_name (val)); return var; } /* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse an existing expression temporary. */ static tree lookup_tmp_var (tree val, bool is_formal) { tree ret; /* If not optimizing, never really reuse a temporary. local-alloc won't allocate any variable that is used in more than one basic block, which means it will go into memory, causing much extra work in reload and final and poorer code generation, outweighing the extra memory allocation here. */ if (!optimize || !is_formal || TREE_SIDE_EFFECTS (val)) ret = create_tmp_from_val (val); else { elt_t elt, *elt_p; elt_t **slot; elt.val = val; if (!gimplify_ctxp->temp_htab) gimplify_ctxp->temp_htab = new hash_table<gimplify_hasher> (1000); slot = gimplify_ctxp->temp_htab->find_slot (&elt, INSERT); if (*slot == NULL) { elt_p = XNEW (elt_t); elt_p->val = val; elt_p->temp = ret = create_tmp_from_val (val); *slot = elt_p; } else { elt_p = *slot; ret = elt_p->temp; } } return ret; } /* Helper for get_formal_tmp_var and get_initialized_tmp_var. */ static tree internal_get_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p, bool is_formal, bool allow_ssa) { tree t, mod; /* Notice that we explicitly allow VAL to be a CALL_EXPR so that we can create an INIT_EXPR and convert it into a GIMPLE_CALL below. */ gimplify_expr (&val, pre_p, post_p, is_gimple_reg_rhs_or_call, fb_rvalue); if (allow_ssa && gimplify_ctxp->into_ssa && is_gimple_reg_type (TREE_TYPE (val))) { t = make_ssa_name (TYPE_MAIN_VARIANT (TREE_TYPE (val))); if (! gimple_in_ssa_p (cfun)) { const char *name = get_name (val); if (name) SET_SSA_NAME_VAR_OR_IDENTIFIER (t, create_tmp_var_name (name)); } } else t = lookup_tmp_var (val, is_formal); mod = build2 (INIT_EXPR, TREE_TYPE (t), t, unshare_expr (val)); SET_EXPR_LOCATION (mod, EXPR_LOC_OR_LOC (val, input_location)); /* gimplify_modify_expr might want to reduce this further. */ gimplify_and_add (mod, pre_p); ggc_free (mod); return t; } /* Return a formal temporary variable initialized with VAL. PRE_P is as in gimplify_expr. Only use this function if: 1) The value of the unfactored expression represented by VAL will not change between the initialization and use of the temporary, and 2) The temporary will not be otherwise modified. For instance, #1 means that this is inappropriate for SAVE_EXPR temps, and #2 means it is inappropriate for && temps. For other cases, use get_initialized_tmp_var instead. */ tree get_formal_tmp_var (tree val, gimple_seq *pre_p) { return internal_get_tmp_var (val, pre_p, NULL, true, true); } /* Return a temporary variable initialized with VAL. PRE_P and POST_P are as in gimplify_expr. */ tree get_initialized_tmp_var (tree val, gimple_seq *pre_p, gimple_seq *post_p /* = NULL */, bool allow_ssa /* = true */) { return internal_get_tmp_var (val, pre_p, post_p, false, allow_ssa); } /* Declare all the variables in VARS in SCOPE. If DEBUG_INFO is true, generate debug info for them; otherwise don't. */ void declare_vars (tree vars, gimple *gs, bool debug_info) { tree last = vars; if (last) { tree temps, block; gbind *scope = as_a <gbind *> (gs); temps = nreverse (last); block = gimple_bind_block (scope); gcc_assert (!block || TREE_CODE (block) == BLOCK); if (!block || !debug_info) { DECL_CHAIN (last) = gimple_bind_vars (scope); gimple_bind_set_vars (scope, temps); } else { /* We need to attach the nodes both to the BIND_EXPR and to its associated BLOCK for debugging purposes. The key point here is that the BLOCK_VARS of the BIND_EXPR_BLOCK of a BIND_EXPR is a subchain of the BIND_EXPR_VARS of the BIND_EXPR. */ if (BLOCK_VARS (block)) BLOCK_VARS (block) = chainon (BLOCK_VARS (block), temps); else { gimple_bind_set_vars (scope, chainon (gimple_bind_vars (scope), temps)); BLOCK_VARS (block) = temps; } } } } /* For VAR a VAR_DECL of variable size, try to find a constant upper bound for the size and adjust DECL_SIZE/DECL_SIZE_UNIT accordingly. Abort if no such upper bound can be obtained. */ static void force_constant_size (tree var) { /* The only attempt we make is by querying the maximum size of objects of the variable's type. */ HOST_WIDE_INT max_size; gcc_assert (VAR_P (var)); max_size = max_int_size_in_bytes (TREE_TYPE (var)); gcc_assert (max_size >= 0); DECL_SIZE_UNIT (var) = build_int_cst (TREE_TYPE (DECL_SIZE_UNIT (var)), max_size); DECL_SIZE (var) = build_int_cst (TREE_TYPE (DECL_SIZE (var)), max_size * BITS_PER_UNIT); } /* Push the temporary variable TMP into the current binding. */ void gimple_add_tmp_var_fn (struct function *fn, tree tmp) { gcc_assert (!DECL_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp)); /* Later processing assumes that the object size is constant, which might not be true at this point. Force the use of a constant upper bound in this case. */ if (!tree_fits_poly_uint64_p (DECL_SIZE_UNIT (tmp))) force_constant_size (tmp); DECL_CONTEXT (tmp) = fn->decl; DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1; record_vars_into (tmp, fn->decl); } /* Push the temporary variable TMP into the current binding. */ void gimple_add_tmp_var (tree tmp) { gcc_assert (!DECL_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp)); /* Later processing assumes that the object size is constant, which might not be true at this point. Force the use of a constant upper bound in this case. */ if (!tree_fits_poly_uint64_p (DECL_SIZE_UNIT (tmp))) force_constant_size (tmp); DECL_CONTEXT (tmp) = current_function_decl; DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1; if (gimplify_ctxp) { DECL_CHAIN (tmp) = gimplify_ctxp->temps; gimplify_ctxp->temps = tmp; /* Mark temporaries local within the nearest enclosing parallel. */ if (gimplify_omp_ctxp) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; int flag = GOVD_LOCAL; while (ctx && (ctx->region_type == ORT_WORKSHARE || ctx->region_type == ORT_TASKGROUP || ctx->region_type == ORT_SIMD || ctx->region_type == ORT_ACC)) { if (ctx->region_type == ORT_SIMD && TREE_ADDRESSABLE (tmp) && !TREE_STATIC (tmp)) { if (TREE_CODE (DECL_SIZE_UNIT (tmp)) != INTEGER_CST) ctx->add_safelen1 = true; else flag = GOVD_PRIVATE; break; } ctx = ctx->outer_context; } if (ctx) omp_add_variable (ctx, tmp, flag | GOVD_SEEN); } } else if (cfun) record_vars (tmp); else { gimple_seq body_seq; /* This case is for nested functions. We need to expose the locals they create. */ body_seq = gimple_body (current_function_decl); declare_vars (tmp, gimple_seq_first_stmt (body_seq), false); } } /* This page contains routines to unshare tree nodes, i.e. to duplicate tree nodes that are referenced more than once in GENERIC functions. This is necessary because gimplification (translation into GIMPLE) is performed by modifying tree nodes in-place, so gimplication of a shared node in a first context could generate an invalid GIMPLE form in a second context. This is achieved with a simple mark/copy/unmark algorithm that walks the GENERIC representation top-down, marks nodes with TREE_VISITED the first time it encounters them, duplicates them if they already have TREE_VISITED set, and finally removes the TREE_VISITED marks it has set. The algorithm works only at the function level, i.e. it generates a GENERIC representation of a function with no nodes shared within the function when passed a GENERIC function (except for nodes that are allowed to be shared). At the global level, it is also necessary to unshare tree nodes that are referenced in more than one function, for the same aforementioned reason. This requires some cooperation from the front-end. There are 2 strategies: 1. Manual unsharing. The front-end needs to call unshare_expr on every expression that might end up being shared across functions. 2. Deep unsharing. This is an extension of regular unsharing. Instead of calling unshare_expr on expressions that might be shared across functions, the front-end pre-marks them with TREE_VISITED. This will ensure that they are unshared on the first reference within functions when the regular unsharing algorithm runs. The counterpart is that this algorithm must look deeper than for manual unsharing, which is specified by LANG_HOOKS_DEEP_UNSHARING. If there are only few specific cases of node sharing across functions, it is probably easier for a front-end to unshare the expressions manually. On the contrary, if the expressions generated at the global level are as widespread as expressions generated within functions, deep unsharing is very likely the way to go. */ /* Similar to copy_tree_r but do not copy SAVE_EXPR or TARGET_EXPR nodes. These nodes model computations that must be done once. If we were to unshare something like SAVE_EXPR(i++), the gimplification process would create wrong code. However, if DATA is non-null, it must hold a pointer set that is used to unshare the subtrees of these nodes. */ static tree mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; enum tree_code code = TREE_CODE (t); /* Do not copy SAVE_EXPR, TARGET_EXPR or BIND_EXPR nodes themselves, but copy their subtrees if we can make sure to do it only once. */ if (code == SAVE_EXPR || code == TARGET_EXPR || code == BIND_EXPR) { if (data && !((hash_set<tree> *)data)->add (t)) ; else *walk_subtrees = 0; } /* Stop at types, decls, constants like copy_tree_r. */ else if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant) *walk_subtrees = 0; /* Cope with the statement expression extension. */ else if (code == STATEMENT_LIST) ; /* Leave the bulk of the work to copy_tree_r itself. */ else copy_tree_r (tp, walk_subtrees, NULL); return NULL_TREE; } /* Callback for walk_tree to unshare most of the shared trees rooted at *TP. If *TP has been visited already, then *TP is deeply copied by calling mostly_copy_tree_r. DATA is passed to mostly_copy_tree_r unmodified. */ static tree copy_if_shared_r (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; enum tree_code code = TREE_CODE (t); /* Skip types, decls, and constants. But we do want to look at their types and the bounds of types. Mark them as visited so we properly unmark their subtrees on the unmark pass. If we've already seen them, don't look down further. */ if (TREE_CODE_CLASS (code) == tcc_type || TREE_CODE_CLASS (code) == tcc_declaration || TREE_CODE_CLASS (code) == tcc_constant) { if (TREE_VISITED (t)) *walk_subtrees = 0; else TREE_VISITED (t) = 1; } /* If this node has been visited already, unshare it and don't look any deeper. */ else if (TREE_VISITED (t)) { walk_tree (tp, mostly_copy_tree_r, data, NULL); *walk_subtrees = 0; } /* Otherwise, mark the node as visited and keep looking. */ else TREE_VISITED (t) = 1; return NULL_TREE; } /* Unshare most of the shared trees rooted at *TP. DATA is passed to the copy_if_shared_r callback unmodified. */ void copy_if_shared (tree *tp, void *data) { walk_tree (tp, copy_if_shared_r, data, NULL); } /* Unshare all the trees in the body of FNDECL, as well as in the bodies of any nested functions. */ static void unshare_body (tree fndecl) { struct cgraph_node *cgn = cgraph_node::get (fndecl); /* If the language requires deep unsharing, we need a pointer set to make sure we don't repeatedly unshare subtrees of unshareable nodes. */ hash_set<tree> *visited = lang_hooks.deep_unsharing ? new hash_set<tree> : NULL; copy_if_shared (&DECL_SAVED_TREE (fndecl), visited); copy_if_shared (&DECL_SIZE (DECL_RESULT (fndecl)), visited); copy_if_shared (&DECL_SIZE_UNIT (DECL_RESULT (fndecl)), visited); delete visited; if (cgn) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unshare_body (cgn->decl); } /* Callback for walk_tree to unmark the visited trees rooted at *TP. Subtrees are walked until the first unvisited node is encountered. */ static tree unmark_visited_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { tree t = *tp; /* If this node has been visited, unmark it and keep looking. */ if (TREE_VISITED (t)) TREE_VISITED (t) = 0; /* Otherwise, don't look any deeper. */ else *walk_subtrees = 0; return NULL_TREE; } /* Unmark the visited trees rooted at *TP. */ static inline void unmark_visited (tree *tp) { walk_tree (tp, unmark_visited_r, NULL, NULL); } /* Likewise, but mark all trees as not visited. */ static void unvisit_body (tree fndecl) { struct cgraph_node *cgn = cgraph_node::get (fndecl); unmark_visited (&DECL_SAVED_TREE (fndecl)); unmark_visited (&DECL_SIZE (DECL_RESULT (fndecl))); unmark_visited (&DECL_SIZE_UNIT (DECL_RESULT (fndecl))); if (cgn) for (cgn = cgn->nested; cgn; cgn = cgn->next_nested) unvisit_body (cgn->decl); } /* Unconditionally make an unshared copy of EXPR. This is used when using stored expressions which span multiple functions, such as BINFO_VTABLE, as the normal unsharing process can't tell that they're shared. */ tree unshare_expr (tree expr) { walk_tree (&expr, mostly_copy_tree_r, NULL, NULL); return expr; } /* Worker for unshare_expr_without_location. */ static tree prune_expr_location (tree *tp, int *walk_subtrees, void *) { if (EXPR_P (*tp)) SET_EXPR_LOCATION (*tp, UNKNOWN_LOCATION); else *walk_subtrees = 0; return NULL_TREE; } /* Similar to unshare_expr but also prune all expression locations from EXPR. */ tree unshare_expr_without_location (tree expr) { walk_tree (&expr, mostly_copy_tree_r, NULL, NULL); if (EXPR_P (expr)) walk_tree (&expr, prune_expr_location, NULL, NULL); return expr; } /* Return the EXPR_LOCATION of EXPR, if it (maybe recursively) has one, OR_ELSE otherwise. The location of a STATEMENT_LISTs comprising at least one DEBUG_BEGIN_STMT followed by exactly one EXPR is the location of the EXPR. */ static location_t rexpr_location (tree expr, location_t or_else = UNKNOWN_LOCATION) { if (!expr) return or_else; if (EXPR_HAS_LOCATION (expr)) return EXPR_LOCATION (expr); if (TREE_CODE (expr) != STATEMENT_LIST) return or_else; tree_stmt_iterator i = tsi_start (expr); bool found = false; while (!tsi_end_p (i) && TREE_CODE (tsi_stmt (i)) == DEBUG_BEGIN_STMT) { found = true; tsi_next (&i); } if (!found || !tsi_one_before_end_p (i)) return or_else; return rexpr_location (tsi_stmt (i), or_else); } /* Return TRUE iff EXPR (maybe recursively) has a location; see rexpr_location for the potential recursion. */ static inline bool rexpr_has_location (tree expr) { return rexpr_location (expr) != UNKNOWN_LOCATION; } /* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both contain statements and have a value. Assign its value to a temporary and give it void_type_node. Return the temporary, or NULL_TREE if WRAPPER was already void. */ tree voidify_wrapper_expr (tree wrapper, tree temp) { tree type = TREE_TYPE (wrapper); if (type && !VOID_TYPE_P (type)) { tree *p; /* Set p to point to the body of the wrapper. Loop until we find something that isn't a wrapper. */ for (p = &wrapper; p && *p; ) { switch (TREE_CODE (*p)) { case BIND_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; /* For a BIND_EXPR, the body is operand 1. */ p = &BIND_EXPR_BODY (*p); break; case CLEANUP_POINT_EXPR: case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = &TREE_OPERAND (*p, 0); break; case STATEMENT_LIST: { tree_stmt_iterator i = tsi_last (*p); TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i); } break; case COMPOUND_EXPR: /* Advance to the last statement. Set all container types to void. */ for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1)) { TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; } break; case TRANSACTION_EXPR: TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = &TRANSACTION_EXPR_BODY (*p); break; default: /* Assume that any tree upon which voidify_wrapper_expr is directly called is a wrapper, and that its body is op0. */ if (p == &wrapper) { TREE_SIDE_EFFECTS (*p) = 1; TREE_TYPE (*p) = void_type_node; p = &TREE_OPERAND (*p, 0); break; } goto out; } } out: if (p == NULL || IS_EMPTY_STMT (*p)) temp = NULL_TREE; else if (temp) { /* The wrapper is on the RHS of an assignment that we're pushing down. */ gcc_assert (TREE_CODE (temp) == INIT_EXPR || TREE_CODE (temp) == MODIFY_EXPR); TREE_OPERAND (temp, 1) = *p; *p = temp; } else { temp = create_tmp_var (type, "retval"); *p = build2 (INIT_EXPR, type, temp, *p); } return temp; } return NULL_TREE; } /* Prepare calls to builtins to SAVE and RESTORE the stack as well as a temporary through which they communicate. */ static void build_stack_save_restore (gcall **save, gcall **restore) { tree tmp_var; *save = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_SAVE), 0); tmp_var = create_tmp_var (ptr_type_node, "saved_stack"); gimple_call_set_lhs (*save, tmp_var); *restore = gimple_build_call (builtin_decl_implicit (BUILT_IN_STACK_RESTORE), 1, tmp_var); } /* Generate IFN_ASAN_MARK call that poisons shadow of a for DECL variable. */ static tree build_asan_poison_call_expr (tree decl) { /* Do not poison variables that have size equal to zero. */ tree unit_size = DECL_SIZE_UNIT (decl); if (zerop (unit_size)) return NULL_TREE; tree base = build_fold_addr_expr (decl); return build_call_expr_internal_loc (UNKNOWN_LOCATION, IFN_ASAN_MARK, void_type_node, 3, build_int_cst (integer_type_node, ASAN_MARK_POISON), base, unit_size); } /* Generate IFN_ASAN_MARK call that would poison or unpoison, depending on POISON flag, shadow memory of a DECL variable. The call will be put on location identified by IT iterator, where BEFORE flag drives position where the stmt will be put. */ static void asan_poison_variable (tree decl, bool poison, gimple_stmt_iterator *it, bool before) { tree unit_size = DECL_SIZE_UNIT (decl); tree base = build_fold_addr_expr (decl); /* Do not poison variables that have size equal to zero. */ if (zerop (unit_size)) return; /* It's necessary to have all stack variables aligned to ASAN granularity bytes. */ if (DECL_ALIGN_UNIT (decl) <= ASAN_SHADOW_GRANULARITY) SET_DECL_ALIGN (decl, BITS_PER_UNIT * ASAN_SHADOW_GRANULARITY); HOST_WIDE_INT flags = poison ? ASAN_MARK_POISON : ASAN_MARK_UNPOISON; gimple *g = gimple_build_call_internal (IFN_ASAN_MARK, 3, build_int_cst (integer_type_node, flags), base, unit_size); if (before) gsi_insert_before (it, g, GSI_NEW_STMT); else gsi_insert_after (it, g, GSI_NEW_STMT); } /* Generate IFN_ASAN_MARK internal call that depending on POISON flag either poisons or unpoisons a DECL. Created statement is appended to SEQ_P gimple sequence. */ static void asan_poison_variable (tree decl, bool poison, gimple_seq *seq_p) { gimple_stmt_iterator it = gsi_last (*seq_p); bool before = false; if (gsi_end_p (it)) before = true; asan_poison_variable (decl, poison, &it, before); } /* Sort pair of VAR_DECLs A and B by DECL_UID. */ static int sort_by_decl_uid (const void *a, const void *b) { const tree *t1 = (const tree *)a; const tree *t2 = (const tree *)b; int uid1 = DECL_UID (*t1); int uid2 = DECL_UID (*t2); if (uid1 < uid2) return -1; else if (uid1 > uid2) return 1; else return 0; } /* Generate IFN_ASAN_MARK internal call for all VARIABLES depending on POISON flag. Created statement is appended to SEQ_P gimple sequence. */ static void asan_poison_variables (hash_set<tree> *variables, bool poison, gimple_seq *seq_p) { unsigned c = variables->elements (); if (c == 0) return; auto_vec<tree> sorted_variables (c); for (hash_set<tree>::iterator it = variables->begin (); it != variables->end (); ++it) sorted_variables.safe_push (*it); sorted_variables.qsort (sort_by_decl_uid); unsigned i; tree var; FOR_EACH_VEC_ELT (sorted_variables, i, var) { asan_poison_variable (var, poison, seq_p); /* Add use_after_scope_memory attribute for the variable in order to prevent re-written into SSA. */ if (!lookup_attribute (ASAN_USE_AFTER_SCOPE_ATTRIBUTE, DECL_ATTRIBUTES (var))) DECL_ATTRIBUTES (var) = tree_cons (get_identifier (ASAN_USE_AFTER_SCOPE_ATTRIBUTE), integer_one_node, DECL_ATTRIBUTES (var)); } } /* Gimplify a BIND_EXPR. Just voidify and recurse. */ static enum gimplify_status gimplify_bind_expr (tree *expr_p, gimple_seq *pre_p) { tree bind_expr = *expr_p; bool old_keep_stack = gimplify_ctxp->keep_stack; bool old_save_stack = gimplify_ctxp->save_stack; tree t; gbind *bind_stmt; gimple_seq body, cleanup; gcall *stack_save; location_t start_locus = 0, end_locus = 0; tree ret_clauses = NULL; tree temp = voidify_wrapper_expr (bind_expr, NULL); /* Mark variables seen in this bind expr. */ for (t = BIND_EXPR_VARS (bind_expr); t ; t = DECL_CHAIN (t)) { if (VAR_P (t)) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; /* Mark variable as local. */ if (ctx && ctx->region_type != ORT_NONE && !DECL_EXTERNAL (t)) { if (! DECL_SEEN_IN_BIND_EXPR_P (t) || splay_tree_lookup (ctx->variables, (splay_tree_key) t) == NULL) { int flag = GOVD_LOCAL; if (ctx->region_type == ORT_SIMD && TREE_ADDRESSABLE (t) && !TREE_STATIC (t)) { if (TREE_CODE (DECL_SIZE_UNIT (t)) != INTEGER_CST) ctx->add_safelen1 = true; else flag = GOVD_PRIVATE; } omp_add_variable (ctx, t, flag | GOVD_SEEN); } /* Static locals inside of target construct or offloaded routines need to be "omp declare target". */ if (TREE_STATIC (t)) for (; ctx; ctx = ctx->outer_context) if ((ctx->region_type & ORT_TARGET) != 0) { if (!lookup_attribute ("omp declare target", DECL_ATTRIBUTES (t))) { tree id = get_identifier ("omp declare target"); DECL_ATTRIBUTES (t) = tree_cons (id, NULL_TREE, DECL_ATTRIBUTES (t)); varpool_node *node = varpool_node::get (t); if (node) { node->offloadable = 1; if (ENABLE_OFFLOADING && !DECL_EXTERNAL (t)) { g->have_offload = true; if (!in_lto_p) vec_safe_push (offload_vars, t); } } } break; } } DECL_SEEN_IN_BIND_EXPR_P (t) = 1; if (DECL_HARD_REGISTER (t) && !is_global_var (t) && cfun) cfun->has_local_explicit_reg_vars = true; } } bind_stmt = gimple_build_bind (BIND_EXPR_VARS (bind_expr), NULL, BIND_EXPR_BLOCK (bind_expr)); gimple_push_bind_expr (bind_stmt); gimplify_ctxp->keep_stack = false; gimplify_ctxp->save_stack = false; /* Gimplify the body into the GIMPLE_BIND tuple's body. */ body = NULL; gimplify_stmt (&BIND_EXPR_BODY (bind_expr), &body); gimple_bind_set_body (bind_stmt, body); /* Source location wise, the cleanup code (stack_restore and clobbers) belongs to the end of the block, so propagate what we have. The stack_save operation belongs to the beginning of block, which we can infer from the bind_expr directly if the block has no explicit assignment. */ if (BIND_EXPR_BLOCK (bind_expr)) { end_locus = BLOCK_SOURCE_END_LOCATION (BIND_EXPR_BLOCK (bind_expr)); start_locus = BLOCK_SOURCE_LOCATION (BIND_EXPR_BLOCK (bind_expr)); } if (start_locus == 0) start_locus = EXPR_LOCATION (bind_expr); cleanup = NULL; stack_save = NULL; /* If the code both contains VLAs and calls alloca, then we cannot reclaim the stack space allocated to the VLAs. */ if (gimplify_ctxp->save_stack && !gimplify_ctxp->keep_stack) { gcall *stack_restore; /* Save stack on entry and restore it on exit. Add a try_finally block to achieve this. */ build_stack_save_restore (&stack_save, &stack_restore); gimple_set_location (stack_save, start_locus); gimple_set_location (stack_restore, end_locus); gimplify_seq_add_stmt (&cleanup, stack_restore); } /* Add clobbers for all variables that go out of scope. */ for (t = BIND_EXPR_VARS (bind_expr); t ; t = DECL_CHAIN (t)) { if (VAR_P (t) && !is_global_var (t) && DECL_CONTEXT (t) == current_function_decl) { if (!DECL_HARD_REGISTER (t) && !TREE_THIS_VOLATILE (t) && !DECL_HAS_VALUE_EXPR_P (t) /* Only care for variables that have to be in memory. Others will be rewritten into SSA names, hence moved to the top-level. */ && !is_gimple_reg (t) && flag_stack_reuse != SR_NONE) { tree clobber = build_clobber (TREE_TYPE (t)); gimple *clobber_stmt; clobber_stmt = gimple_build_assign (t, clobber); gimple_set_location (clobber_stmt, end_locus); gimplify_seq_add_stmt (&cleanup, clobber_stmt); } if (flag_openacc && oacc_declare_returns != NULL) { tree key = t; if (DECL_HAS_VALUE_EXPR_P (key)) { key = DECL_VALUE_EXPR (key); if (TREE_CODE (key) == INDIRECT_REF) key = TREE_OPERAND (key, 0); } tree *c = oacc_declare_returns->get (key); if (c != NULL) { if (ret_clauses) OMP_CLAUSE_CHAIN (*c) = ret_clauses; ret_clauses = unshare_expr (*c); oacc_declare_returns->remove (key); if (oacc_declare_returns->is_empty ()) { delete oacc_declare_returns; oacc_declare_returns = NULL; } } } } if (asan_poisoned_variables != NULL && asan_poisoned_variables->contains (t)) { asan_poisoned_variables->remove (t); asan_poison_variable (t, true, &cleanup); } if (gimplify_ctxp->live_switch_vars != NULL && gimplify_ctxp->live_switch_vars->contains (t)) gimplify_ctxp->live_switch_vars->remove (t); } if (ret_clauses) { gomp_target *stmt; gimple_stmt_iterator si = gsi_start (cleanup); stmt = gimple_build_omp_target (NULL, GF_OMP_TARGET_KIND_OACC_DECLARE, ret_clauses); gsi_insert_seq_before_without_update (&si, stmt, GSI_NEW_STMT); } if (cleanup) { gtry *gs; gimple_seq new_body; new_body = NULL; gs = gimple_build_try (gimple_bind_body (bind_stmt), cleanup, GIMPLE_TRY_FINALLY); if (stack_save) gimplify_seq_add_stmt (&new_body, stack_save); gimplify_seq_add_stmt (&new_body, gs); gimple_bind_set_body (bind_stmt, new_body); } /* keep_stack propagates all the way up to the outermost BIND_EXPR. */ if (!gimplify_ctxp->keep_stack) gimplify_ctxp->keep_stack = old_keep_stack; gimplify_ctxp->save_stack = old_save_stack; gimple_pop_bind_expr (); gimplify_seq_add_stmt (pre_p, bind_stmt); if (temp) { *expr_p = temp; return GS_OK; } *expr_p = NULL_TREE; return GS_ALL_DONE; } /* Maybe add early return predict statement to PRE_P sequence. */ static void maybe_add_early_return_predict_stmt (gimple_seq *pre_p) { /* If we are not in a conditional context, add PREDICT statement. */ if (gimple_conditional_context ()) { gimple *predict = gimple_build_predict (PRED_TREE_EARLY_RETURN, NOT_TAKEN); gimplify_seq_add_stmt (pre_p, predict); } } /* Gimplify a RETURN_EXPR. If the expression to be returned is not a GIMPLE value, it is assigned to a new temporary and the statement is re-written to return the temporary. PRE_P points to the sequence where side effects that must happen before STMT should be stored. */ static enum gimplify_status gimplify_return_expr (tree stmt, gimple_seq *pre_p) { greturn *ret; tree ret_expr = TREE_OPERAND (stmt, 0); tree result_decl, result; if (ret_expr == error_mark_node) return GS_ERROR; if (!ret_expr || TREE_CODE (ret_expr) == RESULT_DECL) { maybe_add_early_return_predict_stmt (pre_p); greturn *ret = gimple_build_return (ret_expr); gimple_set_no_warning (ret, TREE_NO_WARNING (stmt)); gimplify_seq_add_stmt (pre_p, ret); return GS_ALL_DONE; } if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))) result_decl = NULL_TREE; else if (TREE_CODE (ret_expr) == COMPOUND_EXPR) { /* Used in C++ for handling EH cleanup of the return value if a local cleanup throws. Assume the front-end knows what it's doing. */ result_decl = DECL_RESULT (current_function_decl); /* But crash if we end up trying to modify ret_expr below. */ ret_expr = NULL_TREE; } else { result_decl = TREE_OPERAND (ret_expr, 0); /* See through a return by reference. */ if (TREE_CODE (result_decl) == INDIRECT_REF) result_decl = TREE_OPERAND (result_decl, 0); gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR || TREE_CODE (ret_expr) == INIT_EXPR) && TREE_CODE (result_decl) == RESULT_DECL); } /* If aggregate_value_p is true, then we can return the bare RESULT_DECL. Recall that aggregate_value_p is FALSE for any aggregate type that is returned in registers. If we're returning values in registers, then we don't want to extend the lifetime of the RESULT_DECL, particularly across another call. In addition, for those aggregates for which hard_function_value generates a PARALLEL, we'll die during normal expansion of structure assignments; there's special code in expand_return to handle this case that does not exist in expand_expr. */ if (!result_decl) result = NULL_TREE; else if (aggregate_value_p (result_decl, TREE_TYPE (current_function_decl))) { if (!poly_int_tree_p (DECL_SIZE (result_decl))) { if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (result_decl))) gimplify_type_sizes (TREE_TYPE (result_decl), pre_p); /* Note that we don't use gimplify_vla_decl because the RESULT_DECL should be effectively allocated by the caller, i.e. all calls to this function must be subject to the Return Slot Optimization. */ gimplify_one_sizepos (&DECL_SIZE (result_decl), pre_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (result_decl), pre_p); } result = result_decl; } else if (gimplify_ctxp->return_temp) result = gimplify_ctxp->return_temp; else { result = create_tmp_reg (TREE_TYPE (result_decl)); /* ??? With complex control flow (usually involving abnormal edges), we can wind up warning about an uninitialized value for this. Due to how this variable is constructed and initialized, this is never true. Give up and never warn. */ TREE_NO_WARNING (result) = 1; gimplify_ctxp->return_temp = result; } /* Smash the lhs of the MODIFY_EXPR to the temporary we plan to use. Then gimplify the whole thing. */ if (result != result_decl) TREE_OPERAND (ret_expr, 0) = result; gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p); maybe_add_early_return_predict_stmt (pre_p); ret = gimple_build_return (result); gimple_set_no_warning (ret, TREE_NO_WARNING (stmt)); gimplify_seq_add_stmt (pre_p, ret); return GS_ALL_DONE; } /* Gimplify a variable-length array DECL. */ static void gimplify_vla_decl (tree decl, gimple_seq *seq_p) { /* This is a variable-sized decl. Simplify its size and mark it for deferred expansion. */ tree t, addr, ptr_type; gimplify_one_sizepos (&DECL_SIZE (decl), seq_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), seq_p); /* Don't mess with a DECL_VALUE_EXPR set by the front-end. */ if (DECL_HAS_VALUE_EXPR_P (decl)) return; /* All occurrences of this decl in final gimplified code will be replaced by indirection. Setting DECL_VALUE_EXPR does two things: First, it lets the rest of the gimplifier know what replacement to use. Second, it lets the debug info know where to find the value. */ ptr_type = build_pointer_type (TREE_TYPE (decl)); addr = create_tmp_var (ptr_type, get_name (decl)); DECL_IGNORED_P (addr) = 0; t = build_fold_indirect_ref (addr); TREE_THIS_NOTRAP (t) = 1; SET_DECL_VALUE_EXPR (decl, t); DECL_HAS_VALUE_EXPR_P (decl) = 1; t = build_alloca_call_expr (DECL_SIZE_UNIT (decl), DECL_ALIGN (decl), max_int_size_in_bytes (TREE_TYPE (decl))); /* The call has been built for a variable-sized object. */ CALL_ALLOCA_FOR_VAR_P (t) = 1; t = fold_convert (ptr_type, t); t = build2 (MODIFY_EXPR, TREE_TYPE (addr), addr, t); gimplify_and_add (t, seq_p); /* Record the dynamic allocation associated with DECL if requested. */ if (flag_callgraph_info & CALLGRAPH_INFO_DYNAMIC_ALLOC) record_dynamic_alloc (decl); } /* A helper function to be called via walk_tree. Mark all labels under *TP as being forced. To be called for DECL_INITIAL of static variables. */ static tree force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED) { if (TYPE_P (*tp)) *walk_subtrees = 0; if (TREE_CODE (*tp) == LABEL_DECL) { FORCED_LABEL (*tp) = 1; cfun->has_forced_label_in_static = 1; } return NULL_TREE; } /* Gimplify a DECL_EXPR node *STMT_P by making any necessary allocation and initialization explicit. */ static enum gimplify_status gimplify_decl_expr (tree *stmt_p, gimple_seq *seq_p) { tree stmt = *stmt_p; tree decl = DECL_EXPR_DECL (stmt); *stmt_p = NULL_TREE; if (TREE_TYPE (decl) == error_mark_node) return GS_ERROR; if ((TREE_CODE (decl) == TYPE_DECL || VAR_P (decl)) && !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl))) { gimplify_type_sizes (TREE_TYPE (decl), seq_p); if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE) gimplify_type_sizes (TREE_TYPE (TREE_TYPE (decl)), seq_p); } /* ??? DECL_ORIGINAL_TYPE is streamed for LTO so it needs to be gimplified in case its size expressions contain problematic nodes like CALL_EXPR. */ if (TREE_CODE (decl) == TYPE_DECL && DECL_ORIGINAL_TYPE (decl) && !TYPE_SIZES_GIMPLIFIED (DECL_ORIGINAL_TYPE (decl))) { gimplify_type_sizes (DECL_ORIGINAL_TYPE (decl), seq_p); if (TREE_CODE (DECL_ORIGINAL_TYPE (decl)) == REFERENCE_TYPE) gimplify_type_sizes (TREE_TYPE (DECL_ORIGINAL_TYPE (decl)), seq_p); } if (VAR_P (decl) && !DECL_EXTERNAL (decl)) { tree init = DECL_INITIAL (decl); bool is_vla = false; poly_uint64 size; if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), &size) || (!TREE_STATIC (decl) && flag_stack_check == GENERIC_STACK_CHECK && maybe_gt (size, (unsigned HOST_WIDE_INT) STACK_CHECK_MAX_VAR_SIZE))) { gimplify_vla_decl (decl, seq_p); is_vla = true; } if (asan_poisoned_variables && !is_vla && TREE_ADDRESSABLE (decl) && !TREE_STATIC (decl) && !DECL_HAS_VALUE_EXPR_P (decl) && DECL_ALIGN (decl) <= MAX_SUPPORTED_STACK_ALIGNMENT && dbg_cnt (asan_use_after_scope) && !gimplify_omp_ctxp) { asan_poisoned_variables->add (decl); asan_poison_variable (decl, false, seq_p); if (!DECL_ARTIFICIAL (decl) && gimplify_ctxp->live_switch_vars) gimplify_ctxp->live_switch_vars->add (decl); } /* Some front ends do not explicitly declare all anonymous artificial variables. We compensate here by declaring the variables, though it would be better if the front ends would explicitly declare them. */ if (!DECL_SEEN_IN_BIND_EXPR_P (decl) && DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE) gimple_add_tmp_var (decl); if (init && init != error_mark_node) { if (!TREE_STATIC (decl)) { DECL_INITIAL (decl) = NULL_TREE; init = build2 (INIT_EXPR, void_type_node, decl, init); gimplify_and_add (init, seq_p); ggc_free (init); } else /* We must still examine initializers for static variables as they may contain a label address. */ walk_tree (&init, force_labels_r, NULL, NULL); } } return GS_ALL_DONE; } /* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body and replacing the LOOP_EXPR with goto, but if the loop contains an EXIT_EXPR, we need to append a label for it to jump to. */ static enum gimplify_status gimplify_loop_expr (tree *expr_p, gimple_seq *pre_p) { tree saved_label = gimplify_ctxp->exit_label; tree start_label = create_artificial_label (UNKNOWN_LOCATION); gimplify_seq_add_stmt (pre_p, gimple_build_label (start_label)); gimplify_ctxp->exit_label = NULL_TREE; gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p); gimplify_seq_add_stmt (pre_p, gimple_build_goto (start_label)); if (gimplify_ctxp->exit_label) gimplify_seq_add_stmt (pre_p, gimple_build_label (gimplify_ctxp->exit_label)); gimplify_ctxp->exit_label = saved_label; *expr_p = NULL; return GS_ALL_DONE; } /* Gimplify a statement list onto a sequence. These may be created either by an enlightened front-end, or by shortcut_cond_expr. */ static enum gimplify_status gimplify_statement_list (tree *expr_p, gimple_seq *pre_p) { tree temp = voidify_wrapper_expr (*expr_p, NULL); tree_stmt_iterator i = tsi_start (*expr_p); while (!tsi_end_p (i)) { gimplify_stmt (tsi_stmt_ptr (i), pre_p); tsi_delink (&i); } if (temp) { *expr_p = temp; return GS_OK; } return GS_ALL_DONE; } /* Callback for walk_gimple_seq. */ static tree warn_switch_unreachable_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p, struct walk_stmt_info *wi) { gimple *stmt = gsi_stmt (*gsi_p); *handled_ops_p = true; switch (gimple_code (stmt)) { case GIMPLE_TRY: /* A compiler-generated cleanup or a user-written try block. If it's empty, don't dive into it--that would result in worse location info. */ if (gimple_try_eval (stmt) == NULL) { wi->info = stmt; return integer_zero_node; } /* Fall through. */ case GIMPLE_BIND: case GIMPLE_CATCH: case GIMPLE_EH_FILTER: case GIMPLE_TRANSACTION: /* Walk the sub-statements. */ *handled_ops_p = false; break; case GIMPLE_DEBUG: /* Ignore these. We may generate them before declarations that are never executed. If there's something to warn about, there will be non-debug stmts too, and we'll catch those. */ break; case GIMPLE_CALL: if (gimple_call_internal_p (stmt, IFN_ASAN_MARK)) { *handled_ops_p = false; break; } /* Fall through. */ default: /* Save the first "real" statement (not a decl/lexical scope/...). */ wi->info = stmt; return integer_zero_node; } return NULL_TREE; } /* Possibly warn about unreachable statements between switch's controlling expression and the first case. SEQ is the body of a switch expression. */ static void maybe_warn_switch_unreachable (gimple_seq seq) { if (!warn_switch_unreachable /* This warning doesn't play well with Fortran when optimizations are on. */ || lang_GNU_Fortran () || seq == NULL) return; struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); walk_gimple_seq (seq, warn_switch_unreachable_r, NULL, &wi); gimple *stmt = (gimple *) wi.info; if (stmt && gimple_code (stmt) != GIMPLE_LABEL) { if (gimple_code (stmt) == GIMPLE_GOTO && TREE_CODE (gimple_goto_dest (stmt)) == LABEL_DECL && DECL_ARTIFICIAL (gimple_goto_dest (stmt))) /* Don't warn for compiler-generated gotos. These occur in Duff's devices, for example. */; else warning_at (gimple_location (stmt), OPT_Wswitch_unreachable, "statement will never be executed"); } } /* A label entry that pairs label and a location. */ struct label_entry { tree label; location_t loc; }; /* Find LABEL in vector of label entries VEC. */ static struct label_entry * find_label_entry (const auto_vec<struct label_entry> *vec, tree label) { unsigned int i; struct label_entry *l; FOR_EACH_VEC_ELT (*vec, i, l) if (l->label == label) return l; return NULL; } /* Return true if LABEL, a LABEL_DECL, represents a case label in a vector of labels CASES. */ static bool case_label_p (const vec<tree> *cases, tree label) { unsigned int i; tree l; FOR_EACH_VEC_ELT (*cases, i, l) if (CASE_LABEL (l) == label) return true; return false; } /* Find the last nondebug statement in a scope STMT. */ static gimple * last_stmt_in_scope (gimple *stmt) { if (!stmt) return NULL; switch (gimple_code (stmt)) { case GIMPLE_BIND: { gbind *bind = as_a <gbind *> (stmt); stmt = gimple_seq_last_nondebug_stmt (gimple_bind_body (bind)); return last_stmt_in_scope (stmt); } case GIMPLE_TRY: { gtry *try_stmt = as_a <gtry *> (stmt); stmt = gimple_seq_last_nondebug_stmt (gimple_try_eval (try_stmt)); gimple *last_eval = last_stmt_in_scope (stmt); if (gimple_stmt_may_fallthru (last_eval) && (last_eval == NULL || !gimple_call_internal_p (last_eval, IFN_FALLTHROUGH)) && gimple_try_kind (try_stmt) == GIMPLE_TRY_FINALLY) { stmt = gimple_seq_last_nondebug_stmt (gimple_try_cleanup (try_stmt)); return last_stmt_in_scope (stmt); } else return last_eval; } case GIMPLE_DEBUG: gcc_unreachable (); default: return stmt; } } /* Collect interesting labels in LABELS and return the statement preceding another case label, or a user-defined label. Store a location useful to give warnings at *PREVLOC (usually the location of the returned statement or of its surrounding scope). */ static gimple * collect_fallthrough_labels (gimple_stmt_iterator *gsi_p, auto_vec <struct label_entry> *labels, location_t *prevloc) { gimple *prev = NULL; *prevloc = UNKNOWN_LOCATION; do { if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_BIND) { /* Recognize the special GIMPLE_BIND added by gimplify_switch_expr, which starts on a GIMPLE_SWITCH and ends with a break label. Handle that as a single statement that can fall through. */ gbind *bind = as_a <gbind *> (gsi_stmt (*gsi_p)); gimple *first = gimple_seq_first_stmt (gimple_bind_body (bind)); gimple *last = gimple_seq_last_stmt (gimple_bind_body (bind)); if (last && gimple_code (first) == GIMPLE_SWITCH && gimple_code (last) == GIMPLE_LABEL) { tree label = gimple_label_label (as_a <glabel *> (last)); if (SWITCH_BREAK_LABEL_P (label)) { prev = bind; gsi_next (gsi_p); continue; } } } if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_BIND || gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_TRY) { /* Nested scope. Only look at the last statement of the innermost scope. */ location_t bind_loc = gimple_location (gsi_stmt (*gsi_p)); gimple *last = last_stmt_in_scope (gsi_stmt (*gsi_p)); if (last) { prev = last; /* It might be a label without a location. Use the location of the scope then. */ if (!gimple_has_location (prev)) *prevloc = bind_loc; } gsi_next (gsi_p); continue; } /* Ifs are tricky. */ if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_COND) { gcond *cond_stmt = as_a <gcond *> (gsi_stmt (*gsi_p)); tree false_lab = gimple_cond_false_label (cond_stmt); location_t if_loc = gimple_location (cond_stmt); /* If we have e.g. if (i > 1) goto <D.2259>; else goto D; we can't do much with the else-branch. */ if (!DECL_ARTIFICIAL (false_lab)) break; /* Go on until the false label, then one step back. */ for (; !gsi_end_p (*gsi_p); gsi_next (gsi_p)) { gimple *stmt = gsi_stmt (*gsi_p); if (gimple_code (stmt) == GIMPLE_LABEL && gimple_label_label (as_a <glabel *> (stmt)) == false_lab) break; } /* Not found? Oops. */ if (gsi_end_p (*gsi_p)) break; struct label_entry l = { false_lab, if_loc }; labels->safe_push (l); /* Go to the last statement of the then branch. */ gsi_prev (gsi_p); /* if (i != 0) goto <D.1759>; else goto <D.1760>; <D.1759>: <stmt>; goto <D.1761>; <D.1760>: */ if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_GOTO && !gimple_has_location (gsi_stmt (*gsi_p))) { /* Look at the statement before, it might be attribute fallthrough, in which case don't warn. */ gsi_prev (gsi_p); bool fallthru_before_dest = gimple_call_internal_p (gsi_stmt (*gsi_p), IFN_FALLTHROUGH); gsi_next (gsi_p); tree goto_dest = gimple_goto_dest (gsi_stmt (*gsi_p)); if (!fallthru_before_dest) { struct label_entry l = { goto_dest, if_loc }; labels->safe_push (l); } } /* And move back. */ gsi_next (gsi_p); } /* Remember the last statement. Skip labels that are of no interest to us. */ if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_LABEL) { tree label = gimple_label_label (as_a <glabel *> (gsi_stmt (*gsi_p))); if (find_label_entry (labels, label)) prev = gsi_stmt (*gsi_p); } else if (gimple_call_internal_p (gsi_stmt (*gsi_p), IFN_ASAN_MARK)) ; else if (gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_PREDICT) ; else if (!is_gimple_debug (gsi_stmt (*gsi_p))) prev = gsi_stmt (*gsi_p); gsi_next (gsi_p); } while (!gsi_end_p (*gsi_p) /* Stop if we find a case or a user-defined label. */ && (gimple_code (gsi_stmt (*gsi_p)) != GIMPLE_LABEL || !gimple_has_location (gsi_stmt (*gsi_p)))); if (prev && gimple_has_location (prev)) *prevloc = gimple_location (prev); return prev; } /* Return true if the switch fallthough warning should occur. LABEL is the label statement that we're falling through to. */ static bool should_warn_for_implicit_fallthrough (gimple_stmt_iterator *gsi_p, tree label) { gimple_stmt_iterator gsi = *gsi_p; /* Don't warn if the label is marked with a "falls through" comment. */ if (FALLTHROUGH_LABEL_P (label)) return false; /* Don't warn for non-case labels followed by a statement: case 0: foo (); label: bar (); as these are likely intentional. */ if (!case_label_p (&gimplify_ctxp->case_labels, label)) { tree l; while (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL && (l = gimple_label_label (as_a <glabel *> (gsi_stmt (gsi)))) && !case_label_p (&gimplify_ctxp->case_labels, l)) gsi_next_nondebug (&gsi); if (gsi_end_p (gsi) || gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL) return false; } /* Don't warn for terminated branches, i.e. when the subsequent case labels immediately breaks. */ gsi = *gsi_p; /* Skip all immediately following labels. */ while (!gsi_end_p (gsi) && (gimple_code (gsi_stmt (gsi)) == GIMPLE_LABEL || gimple_code (gsi_stmt (gsi)) == GIMPLE_PREDICT)) gsi_next_nondebug (&gsi); /* { ... something; default:; } */ if (gsi_end_p (gsi) /* { ... something; default: break; } or { ... something; default: goto L; } */ || gimple_code (gsi_stmt (gsi)) == GIMPLE_GOTO /* { ... something; default: return; } */ || gimple_code (gsi_stmt (gsi)) == GIMPLE_RETURN) return false; return true; } /* Callback for walk_gimple_seq. */ static tree warn_implicit_fallthrough_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p, struct walk_stmt_info *) { gimple *stmt = gsi_stmt (*gsi_p); *handled_ops_p = true; switch (gimple_code (stmt)) { case GIMPLE_TRY: case GIMPLE_BIND: case GIMPLE_CATCH: case GIMPLE_EH_FILTER: case GIMPLE_TRANSACTION: /* Walk the sub-statements. */ *handled_ops_p = false; break; /* Find a sequence of form: GIMPLE_LABEL [...] <may fallthru stmt> GIMPLE_LABEL and possibly warn. */ case GIMPLE_LABEL: { /* Found a label. Skip all immediately following labels. */ while (!gsi_end_p (*gsi_p) && gimple_code (gsi_stmt (*gsi_p)) == GIMPLE_LABEL) gsi_next_nondebug (gsi_p); /* There might be no more statements. */ if (gsi_end_p (*gsi_p)) return integer_zero_node; /* Vector of labels that fall through. */ auto_vec <struct label_entry> labels; location_t prevloc; gimple *prev = collect_fallthrough_labels (gsi_p, &labels, &prevloc); /* There might be no more statements. */ if (gsi_end_p (*gsi_p)) return integer_zero_node; gimple *next = gsi_stmt (*gsi_p); tree label; /* If what follows is a label, then we may have a fallthrough. */ if (gimple_code (next) == GIMPLE_LABEL && gimple_has_location (next) && (label = gimple_label_label (as_a <glabel *> (next))) && prev != NULL) { struct label_entry *l; bool warned_p = false; auto_diagnostic_group d; if (!should_warn_for_implicit_fallthrough (gsi_p, label)) /* Quiet. */; else if (gimple_code (prev) == GIMPLE_LABEL && (label = gimple_label_label (as_a <glabel *> (prev))) && (l = find_label_entry (&labels, label))) warned_p = warning_at (l->loc, OPT_Wimplicit_fallthrough_, "this statement may fall through"); else if (!gimple_call_internal_p (prev, IFN_FALLTHROUGH) /* Try to be clever and don't warn when the statement can't actually fall through. */ && gimple_stmt_may_fallthru (prev) && prevloc != UNKNOWN_LOCATION) warned_p = warning_at (prevloc, OPT_Wimplicit_fallthrough_, "this statement may fall through"); if (warned_p) inform (gimple_location (next), "here"); /* Mark this label as processed so as to prevent multiple warnings in nested switches. */ FALLTHROUGH_LABEL_P (label) = true; /* So that next warn_implicit_fallthrough_r will start looking for a new sequence starting with this label. */ gsi_prev (gsi_p); } } break; default: break; } return NULL_TREE; } /* Warn when a switch case falls through. */ static void maybe_warn_implicit_fallthrough (gimple_seq seq) { if (!warn_implicit_fallthrough) return; /* This warning is meant for C/C++/ObjC/ObjC++ only. */ if (!(lang_GNU_C () || lang_GNU_CXX () || lang_GNU_OBJC ())) return; struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); walk_gimple_seq (seq, warn_implicit_fallthrough_r, NULL, &wi); } /* Callback for walk_gimple_seq. */ static tree expand_FALLTHROUGH_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p, struct walk_stmt_info *wi) { gimple *stmt = gsi_stmt (*gsi_p); *handled_ops_p = true; switch (gimple_code (stmt)) { case GIMPLE_TRY: case GIMPLE_BIND: case GIMPLE_CATCH: case GIMPLE_EH_FILTER: case GIMPLE_TRANSACTION: /* Walk the sub-statements. */ *handled_ops_p = false; break; case GIMPLE_CALL: if (gimple_call_internal_p (stmt, IFN_FALLTHROUGH)) { gsi_remove (gsi_p, true); if (gsi_end_p (*gsi_p)) { *static_cast<location_t *>(wi->info) = gimple_location (stmt); return integer_zero_node; } bool found = false; location_t loc = gimple_location (stmt); gimple_stmt_iterator gsi2 = *gsi_p; stmt = gsi_stmt (gsi2); if (gimple_code (stmt) == GIMPLE_GOTO && !gimple_has_location (stmt)) { /* Go on until the artificial label. */ tree goto_dest = gimple_goto_dest (stmt); for (; !gsi_end_p (gsi2); gsi_next (&gsi2)) { if (gimple_code (gsi_stmt (gsi2)) == GIMPLE_LABEL && gimple_label_label (as_a <glabel *> (gsi_stmt (gsi2))) == goto_dest) break; } /* Not found? Stop. */ if (gsi_end_p (gsi2)) break; /* Look one past it. */ gsi_next (&gsi2); } /* We're looking for a case label or default label here. */ while (!gsi_end_p (gsi2)) { stmt = gsi_stmt (gsi2); if (gimple_code (stmt) == GIMPLE_LABEL) { tree label = gimple_label_label (as_a <glabel *> (stmt)); if (gimple_has_location (stmt) && DECL_ARTIFICIAL (label)) { found = true; break; } } else if (gimple_call_internal_p (stmt, IFN_ASAN_MARK)) ; else if (!is_gimple_debug (stmt)) /* Anything else is not expected. */ break; gsi_next (&gsi2); } if (!found) pedwarn (loc, 0, "attribute %<fallthrough%> not preceding " "a case label or default label"); } break; default: break; } return NULL_TREE; } /* Expand all FALLTHROUGH () calls in SEQ. */ static void expand_FALLTHROUGH (gimple_seq *seq_p) { struct walk_stmt_info wi; location_t loc; memset (&wi, 0, sizeof (wi)); wi.info = (void *) &loc; walk_gimple_seq_mod (seq_p, expand_FALLTHROUGH_r, NULL, &wi); if (wi.callback_result == integer_zero_node) /* We've found [[fallthrough]]; at the end of a switch, which the C++ standard says is ill-formed; see [dcl.attr.fallthrough]. */ pedwarn (loc, 0, "attribute %<fallthrough%> not preceding " "a case label or default label"); } /* Gimplify a SWITCH_EXPR, and collect the vector of labels it can branch to. */ static enum gimplify_status gimplify_switch_expr (tree *expr_p, gimple_seq *pre_p) { tree switch_expr = *expr_p; gimple_seq switch_body_seq = NULL; enum gimplify_status ret; tree index_type = TREE_TYPE (switch_expr); if (index_type == NULL_TREE) index_type = TREE_TYPE (SWITCH_COND (switch_expr)); ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL, is_gimple_val, fb_rvalue); if (ret == GS_ERROR || ret == GS_UNHANDLED) return ret; if (SWITCH_BODY (switch_expr)) { vec<tree> labels; vec<tree> saved_labels; hash_set<tree> *saved_live_switch_vars = NULL; tree default_case = NULL_TREE; gswitch *switch_stmt; /* Save old labels, get new ones from body, then restore the old labels. Save all the things from the switch body to append after. */ saved_labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels.create (8); /* Do not create live_switch_vars if SWITCH_BODY is not a BIND_EXPR. */ saved_live_switch_vars = gimplify_ctxp->live_switch_vars; tree_code body_type = TREE_CODE (SWITCH_BODY (switch_expr)); if (body_type == BIND_EXPR || body_type == STATEMENT_LIST) gimplify_ctxp->live_switch_vars = new hash_set<tree> (4); else gimplify_ctxp->live_switch_vars = NULL; bool old_in_switch_expr = gimplify_ctxp->in_switch_expr; gimplify_ctxp->in_switch_expr = true; gimplify_stmt (&SWITCH_BODY (switch_expr), &switch_body_seq); gimplify_ctxp->in_switch_expr = old_in_switch_expr; maybe_warn_switch_unreachable (switch_body_seq); maybe_warn_implicit_fallthrough (switch_body_seq); /* Only do this for the outermost GIMPLE_SWITCH. */ if (!gimplify_ctxp->in_switch_expr) expand_FALLTHROUGH (&switch_body_seq); labels = gimplify_ctxp->case_labels; gimplify_ctxp->case_labels = saved_labels; if (gimplify_ctxp->live_switch_vars) { gcc_assert (gimplify_ctxp->live_switch_vars->is_empty ()); delete gimplify_ctxp->live_switch_vars; } gimplify_ctxp->live_switch_vars = saved_live_switch_vars; preprocess_case_label_vec_for_gimple (labels, index_type, &default_case); bool add_bind = false; if (!default_case) { glabel *new_default; default_case = build_case_label (NULL_TREE, NULL_TREE, create_artificial_label (UNKNOWN_LOCATION)); if (old_in_switch_expr) { SWITCH_BREAK_LABEL_P (CASE_LABEL (default_case)) = 1; add_bind = true; } new_default = gimple_build_label (CASE_LABEL (default_case)); gimplify_seq_add_stmt (&switch_body_seq, new_default); } else if (old_in_switch_expr) { gimple *last = gimple_seq_last_stmt (switch_body_seq); if (last && gimple_code (last) == GIMPLE_LABEL) { tree label = gimple_label_label (as_a <glabel *> (last)); if (SWITCH_BREAK_LABEL_P (label)) add_bind = true; } } switch_stmt = gimple_build_switch (SWITCH_COND (switch_expr), default_case, labels); /* For the benefit of -Wimplicit-fallthrough, if switch_body_seq ends with a GIMPLE_LABEL holding SWITCH_BREAK_LABEL_P LABEL_DECL, wrap the GIMPLE_SWITCH up to that GIMPLE_LABEL into a GIMPLE_BIND, so that we can easily find the start and end of the switch statement. */ if (add_bind) { gimple_seq bind_body = NULL; gimplify_seq_add_stmt (&bind_body, switch_stmt); gimple_seq_add_seq (&bind_body, switch_body_seq); gbind *bind = gimple_build_bind (NULL_TREE, bind_body, NULL_TREE); gimple_set_location (bind, EXPR_LOCATION (switch_expr)); gimplify_seq_add_stmt (pre_p, bind); } else { gimplify_seq_add_stmt (pre_p, switch_stmt); gimplify_seq_add_seq (pre_p, switch_body_seq); } labels.release (); } else gcc_unreachable (); return GS_ALL_DONE; } /* Gimplify the LABEL_EXPR pointed to by EXPR_P. */ static enum gimplify_status gimplify_label_expr (tree *expr_p, gimple_seq *pre_p) { gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p)) == current_function_decl); tree label = LABEL_EXPR_LABEL (*expr_p); glabel *label_stmt = gimple_build_label (label); gimple_set_location (label_stmt, EXPR_LOCATION (*expr_p)); gimplify_seq_add_stmt (pre_p, label_stmt); if (lookup_attribute ("cold", DECL_ATTRIBUTES (label))) gimple_seq_add_stmt (pre_p, gimple_build_predict (PRED_COLD_LABEL, NOT_TAKEN)); else if (lookup_attribute ("hot", DECL_ATTRIBUTES (label))) gimple_seq_add_stmt (pre_p, gimple_build_predict (PRED_HOT_LABEL, TAKEN)); return GS_ALL_DONE; } /* Gimplify the CASE_LABEL_EXPR pointed to by EXPR_P. */ static enum gimplify_status gimplify_case_label_expr (tree *expr_p, gimple_seq *pre_p) { struct gimplify_ctx *ctxp; glabel *label_stmt; /* Invalid programs can play Duff's Device type games with, for example, #pragma omp parallel. At least in the C front end, we don't detect such invalid branches until after gimplification, in the diagnose_omp_blocks pass. */ for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context) if (ctxp->case_labels.exists ()) break; tree label = CASE_LABEL (*expr_p); label_stmt = gimple_build_label (label); gimple_set_location (label_stmt, EXPR_LOCATION (*expr_p)); ctxp->case_labels.safe_push (*expr_p); gimplify_seq_add_stmt (pre_p, label_stmt); if (lookup_attribute ("cold", DECL_ATTRIBUTES (label))) gimple_seq_add_stmt (pre_p, gimple_build_predict (PRED_COLD_LABEL, NOT_TAKEN)); else if (lookup_attribute ("hot", DECL_ATTRIBUTES (label))) gimple_seq_add_stmt (pre_p, gimple_build_predict (PRED_HOT_LABEL, TAKEN)); return GS_ALL_DONE; } /* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first if necessary. */ tree build_and_jump (tree *label_p) { if (label_p == NULL) /* If there's nowhere to jump, just fall through. */ return NULL_TREE; if (*label_p == NULL_TREE) { tree label = create_artificial_label (UNKNOWN_LOCATION); *label_p = label; } return build1 (GOTO_EXPR, void_type_node, *label_p); } /* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR. This also involves building a label to jump to and communicating it to gimplify_loop_expr through gimplify_ctxp->exit_label. */ static enum gimplify_status gimplify_exit_expr (tree *expr_p) { tree cond = TREE_OPERAND (*expr_p, 0); tree expr; expr = build_and_jump (&gimplify_ctxp->exit_label); expr = build3 (COND_EXPR, void_type_node, cond, expr, NULL_TREE); *expr_p = expr; return GS_OK; } /* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is different from its canonical type, wrap the whole thing inside a NOP_EXPR and force the type of the COMPONENT_REF to be the canonical type. The canonical type of a COMPONENT_REF is the type of the field being referenced--unless the field is a bit-field which can be read directly in a smaller mode, in which case the canonical type is the sign-appropriate type corresponding to that mode. */ static void canonicalize_component_ref (tree *expr_p) { tree expr = *expr_p; tree type; gcc_assert (TREE_CODE (expr) == COMPONENT_REF); if (INTEGRAL_TYPE_P (TREE_TYPE (expr))) type = TREE_TYPE (get_unwidened (expr, NULL_TREE)); else type = TREE_TYPE (TREE_OPERAND (expr, 1)); /* One could argue that all the stuff below is not necessary for the non-bitfield case and declare it a FE error if type adjustment would be needed. */ if (TREE_TYPE (expr) != type) { #ifdef ENABLE_TYPES_CHECKING tree old_type = TREE_TYPE (expr); #endif int type_quals; /* We need to preserve qualifiers and propagate them from operand 0. */ type_quals = TYPE_QUALS (type) | TYPE_QUALS (TREE_TYPE (TREE_OPERAND (expr, 0))); if (TYPE_QUALS (type) != type_quals) type = build_qualified_type (TYPE_MAIN_VARIANT (type), type_quals); /* Set the type of the COMPONENT_REF to the underlying type. */ TREE_TYPE (expr) = type; #ifdef ENABLE_TYPES_CHECKING /* It is now a FE error, if the conversion from the canonical type to the original expression type is not useless. */ gcc_assert (useless_type_conversion_p (old_type, type)); #endif } } /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR by converting T array[U]; (T *)&array ==> &array[L] where L is the lower bound. For simplicity, only do this for constant lower bound. The constraint is that the type of &array[L] is trivially convertible to T *. */ static void canonicalize_addr_expr (tree *expr_p) { tree expr = *expr_p; tree addr_expr = TREE_OPERAND (expr, 0); tree datype, ddatype, pddatype; /* We simplify only conversions from an ADDR_EXPR to a pointer type. */ if (!POINTER_TYPE_P (TREE_TYPE (expr)) || TREE_CODE (addr_expr) != ADDR_EXPR) return; /* The addr_expr type should be a pointer to an array. */ datype = TREE_TYPE (TREE_TYPE (addr_expr)); if (TREE_CODE (datype) != ARRAY_TYPE) return; /* The pointer to element type shall be trivially convertible to the expression pointer type. */ ddatype = TREE_TYPE (datype); pddatype = build_pointer_type (ddatype); if (!useless_type_conversion_p (TYPE_MAIN_VARIANT (TREE_TYPE (expr)), pddatype)) return; /* The lower bound and element sizes must be constant. */ if (!TYPE_SIZE_UNIT (ddatype) || TREE_CODE (TYPE_SIZE_UNIT (ddatype)) != INTEGER_CST || !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype)) || TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST) return; /* All checks succeeded. Build a new node to merge the cast. */ *expr_p = build4 (ARRAY_REF, ddatype, TREE_OPERAND (addr_expr, 0), TYPE_MIN_VALUE (TYPE_DOMAIN (datype)), NULL_TREE, NULL_TREE); *expr_p = build1 (ADDR_EXPR, pddatype, *expr_p); /* We can have stripped a required restrict qualifier above. */ if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p))) *expr_p = fold_convert (TREE_TYPE (expr), *expr_p); } /* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions underneath as appropriate. */ static enum gimplify_status gimplify_conversion (tree *expr_p) { location_t loc = EXPR_LOCATION (*expr_p); gcc_assert (CONVERT_EXPR_P (*expr_p)); /* Then strip away all but the outermost conversion. */ STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0)); /* And remove the outermost conversion if it's useless. */ if (tree_ssa_useless_type_conversion (*expr_p)) *expr_p = TREE_OPERAND (*expr_p, 0); /* If we still have a conversion at the toplevel, then canonicalize some constructs. */ if (CONVERT_EXPR_P (*expr_p)) { tree sub = TREE_OPERAND (*expr_p, 0); /* If a NOP conversion is changing the type of a COMPONENT_REF expression, then canonicalize its type now in order to expose more redundant conversions. */ if (TREE_CODE (sub) == COMPONENT_REF) canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0)); /* If a NOP conversion is changing a pointer to array of foo to a pointer to foo, embed that change in the ADDR_EXPR. */ else if (TREE_CODE (sub) == ADDR_EXPR) canonicalize_addr_expr (expr_p); } /* If we have a conversion to a non-register type force the use of a VIEW_CONVERT_EXPR instead. */ if (CONVERT_EXPR_P (*expr_p) && !is_gimple_reg_type (TREE_TYPE (*expr_p))) *expr_p = fold_build1_loc (loc, VIEW_CONVERT_EXPR, TREE_TYPE (*expr_p), TREE_OPERAND (*expr_p, 0)); /* Canonicalize CONVERT_EXPR to NOP_EXPR. */ if (TREE_CODE (*expr_p) == CONVERT_EXPR) TREE_SET_CODE (*expr_p, NOP_EXPR); return GS_OK; } /* Gimplify a VAR_DECL or PARM_DECL. Return GS_OK if we expanded a DECL_VALUE_EXPR, and it's worth re-examining things. */ static enum gimplify_status gimplify_var_or_parm_decl (tree *expr_p) { tree decl = *expr_p; /* ??? If this is a local variable, and it has not been seen in any outer BIND_EXPR, then it's probably the result of a duplicate declaration, for which we've already issued an error. It would be really nice if the front end wouldn't leak these at all. Currently the only known culprit is C++ destructors, as seen in g++.old-deja/g++.jason/binding.C. */ if (VAR_P (decl) && !DECL_SEEN_IN_BIND_EXPR_P (decl) && !TREE_STATIC (decl) && !DECL_EXTERNAL (decl) && decl_function_context (decl) == current_function_decl) { gcc_assert (seen_error ()); return GS_ERROR; } /* When within an OMP context, notice uses of variables. */ if (gimplify_omp_ctxp && omp_notice_variable (gimplify_omp_ctxp, decl, true)) return GS_ALL_DONE; /* If the decl is an alias for another expression, substitute it now. */ if (DECL_HAS_VALUE_EXPR_P (decl)) { *expr_p = unshare_expr (DECL_VALUE_EXPR (decl)); return GS_OK; } return GS_ALL_DONE; } /* Recalculate the value of the TREE_SIDE_EFFECTS flag for T. */ static void recalculate_side_effects (tree t) { enum tree_code code = TREE_CODE (t); int len = TREE_OPERAND_LENGTH (t); int i; switch (TREE_CODE_CLASS (code)) { case tcc_expression: switch (code) { case INIT_EXPR: case MODIFY_EXPR: case VA_ARG_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: /* All of these have side-effects, no matter what their operands are. */ return; default: break; } /* Fall through. */ case tcc_comparison: /* a comparison expression */ case tcc_unary: /* a unary arithmetic expression */ case tcc_binary: /* a binary arithmetic expression */ case tcc_reference: /* a reference */ case tcc_vl_exp: /* a function call */ TREE_SIDE_EFFECTS (t) = TREE_THIS_VOLATILE (t); for (i = 0; i < len; ++i) { tree op = TREE_OPERAND (t, i); if (op && TREE_SIDE_EFFECTS (op)) TREE_SIDE_EFFECTS (t) = 1; } break; case tcc_constant: /* No side-effects. */ return; default: gcc_unreachable (); } } /* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR node *EXPR_P. compound_lval : min_lval '[' val ']' | min_lval '.' ID | compound_lval '[' val ']' | compound_lval '.' ID This is not part of the original SIMPLE definition, which separates array and member references, but it seems reasonable to handle them together. Also, this way we don't run into problems with union aliasing; gcc requires that for accesses through a union to alias, the union reference must be explicit, which was not always the case when we were splitting up array and member refs. PRE_P points to the sequence where side effects that must happen before *EXPR_P should be stored. POST_P points to the sequence where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_compound_lval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, fallback_t fallback) { tree *p; enum gimplify_status ret = GS_ALL_DONE, tret; int i; location_t loc = EXPR_LOCATION (*expr_p); tree expr = *expr_p; /* Create a stack of the subexpressions so later we can walk them in order from inner to outer. */ auto_vec<tree, 10> expr_stack; /* We can handle anything that get_inner_reference can deal with. */ for (p = expr_p; ; p = &TREE_OPERAND (*p, 0)) { restart: /* Fold INDIRECT_REFs now to turn them into ARRAY_REFs. */ if (TREE_CODE (*p) == INDIRECT_REF) *p = fold_indirect_ref_loc (loc, *p); if (handled_component_p (*p)) ; /* Expand DECL_VALUE_EXPR now. In some cases that may expose additional COMPONENT_REFs. */ else if ((VAR_P (*p) || TREE_CODE (*p) == PARM_DECL) && gimplify_var_or_parm_decl (p) == GS_OK) goto restart; else break; expr_stack.safe_push (*p); } gcc_assert (expr_stack.length ()); /* Now EXPR_STACK is a stack of pointers to all the refs we've walked through and P points to the innermost expression. Java requires that we elaborated nodes in source order. That means we must gimplify the inner expression followed by each of the indices, in order. But we can't gimplify the inner expression until we deal with any variable bounds, sizes, or positions in order to deal with PLACEHOLDER_EXPRs. So we do this in three steps. First we deal with the annotations for any variables in the components, then we gimplify the base, then we gimplify any indices, from left to right. */ for (i = expr_stack.length () - 1; i >= 0; i--) { tree t = expr_stack[i]; if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the low bound and element type size and put them into the ARRAY_REF. If these values are set, they have already been gimplified. */ if (TREE_OPERAND (t, 2) == NULL_TREE) { tree low = unshare_expr (array_ref_low_bound (t)); if (!is_gimple_min_invariant (low)) { TREE_OPERAND (t, 2) = low; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } } else { tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } if (TREE_OPERAND (t, 3) == NULL_TREE) { tree elmt_size = array_ref_element_size (t); if (!is_gimple_min_invariant (elmt_size)) { elmt_size = unshare_expr (elmt_size); tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0))); tree factor = size_int (TYPE_ALIGN_UNIT (elmt_type)); /* Divide the element size by the alignment of the element type (above). */ elmt_size = size_binop_loc (loc, EXACT_DIV_EXPR, elmt_size, factor); TREE_OPERAND (t, 3) = elmt_size; tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } } else { tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } } else if (TREE_CODE (t) == COMPONENT_REF) { /* Set the field offset into T and gimplify it. */ if (TREE_OPERAND (t, 2) == NULL_TREE) { tree offset = component_ref_field_offset (t); if (!is_gimple_min_invariant (offset)) { offset = unshare_expr (offset); tree field = TREE_OPERAND (t, 1); tree factor = size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT); /* Divide the offset by its alignment. */ offset = size_binop_loc (loc, EXACT_DIV_EXPR, offset, factor); TREE_OPERAND (t, 2) = offset; tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } } else { tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p, is_gimple_reg, fb_rvalue); ret = MIN (ret, tret); } } } /* Step 2 is to gimplify the base expression. Make sure lvalue is set so as to match the min_lval predicate. Failure to do so may result in the creation of large aggregate temporaries. */ tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback | fb_lvalue); ret = MIN (ret, tret); /* And finally, the indices and operands of ARRAY_REF. During this loop we also remove any useless conversions. */ for (; expr_stack.length () > 0; ) { tree t = expr_stack.pop (); if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF) { /* Gimplify the dimension. */ if (!is_gimple_min_invariant (TREE_OPERAND (t, 1))) { tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); } } STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0)); /* The innermost expression P may have originally had TREE_SIDE_EFFECTS set which would have caused all the outer expressions in *EXPR_P leading to P to also have had TREE_SIDE_EFFECTS set. */ recalculate_side_effects (t); } /* If the outermost expression is a COMPONENT_REF, canonicalize its type. */ if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF) { canonicalize_component_ref (expr_p); } expr_stack.release (); gcc_assert (*expr_p == expr || ret != GS_ALL_DONE); return ret; } /* Gimplify the self modifying expression pointed to by EXPR_P (++, --, +=, -=). PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. ARITH_TYPE is the type the computation should be performed in. */ enum gimplify_status gimplify_self_mod_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, bool want_value, tree arith_type) { enum tree_code code; tree lhs, lvalue, rhs, t1; gimple_seq post = NULL, *orig_post_p = post_p; bool postfix; enum tree_code arith_code; enum gimplify_status ret; location_t loc = EXPR_LOCATION (*expr_p); code = TREE_CODE (*expr_p); gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR || code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR); /* Prefix or postfix? */ if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR) /* Faster to treat as prefix if result is not used. */ postfix = want_value; else postfix = false; /* For postfix, make sure the inner expression's post side effects are executed after side effects from this expression. */ if (postfix) post_p = &post; /* Add or subtract? */ if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR) arith_code = PLUS_EXPR; else arith_code = MINUS_EXPR; /* Gimplify the LHS into a GIMPLE lvalue. */ lvalue = TREE_OPERAND (*expr_p, 0); ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; /* Extract the operands to the arithmetic operation. */ lhs = lvalue; rhs = TREE_OPERAND (*expr_p, 1); /* For postfix operator, we evaluate the LHS to an rvalue and then use that as the result value and in the postqueue operation. */ if (postfix) { ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue); if (ret == GS_ERROR) return ret; lhs = get_initialized_tmp_var (lhs, pre_p); } /* For POINTERs increment, use POINTER_PLUS_EXPR. */ if (POINTER_TYPE_P (TREE_TYPE (lhs))) { rhs = convert_to_ptrofftype_loc (loc, rhs); if (arith_code == MINUS_EXPR) rhs = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (rhs), rhs); t1 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (*expr_p), lhs, rhs); } else t1 = fold_convert (TREE_TYPE (*expr_p), fold_build2 (arith_code, arith_type, fold_convert (arith_type, lhs), fold_convert (arith_type, rhs))); if (postfix) { gimplify_assign (lvalue, t1, pre_p); gimplify_seq_add_seq (orig_post_p, post); *expr_p = lhs; return GS_ALL_DONE; } else { *expr_p = build2 (MODIFY_EXPR, TREE_TYPE (lvalue), lvalue, t1); return GS_OK; } } /* If *EXPR_P has a variable sized type, wrap it in a WITH_SIZE_EXPR. */ static void maybe_with_size_expr (tree *expr_p) { tree expr = *expr_p; tree type = TREE_TYPE (expr); tree size; /* If we've already wrapped this or the type is error_mark_node, we can't do anything. */ if (TREE_CODE (expr) == WITH_SIZE_EXPR || type == error_mark_node) return; /* If the size isn't known or is a constant, we have nothing to do. */ size = TYPE_SIZE_UNIT (type); if (!size || poly_int_tree_p (size)) return; /* Otherwise, make a WITH_SIZE_EXPR. */ size = unshare_expr (size); size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, expr); *expr_p = build2 (WITH_SIZE_EXPR, type, expr, size); } /* Helper for gimplify_call_expr. Gimplify a single argument *ARG_P Store any side-effects in PRE_P. CALL_LOCATION is the location of the CALL_EXPR. If ALLOW_SSA is set the actual parameter may be gimplified to an SSA name. */ enum gimplify_status gimplify_arg (tree *arg_p, gimple_seq *pre_p, location_t call_location, bool allow_ssa) { bool (*test) (tree); fallback_t fb; /* In general, we allow lvalues for function arguments to avoid extra overhead of copying large aggregates out of even larger aggregates into temporaries only to copy the temporaries to the argument list. Make optimizers happy by pulling out to temporaries those types that fit in registers. */ if (is_gimple_reg_type (TREE_TYPE (*arg_p))) test = is_gimple_val, fb = fb_rvalue; else { test = is_gimple_lvalue, fb = fb_either; /* Also strip a TARGET_EXPR that would force an extra copy. */ if (TREE_CODE (*arg_p) == TARGET_EXPR) { tree init = TARGET_EXPR_INITIAL (*arg_p); if (init && !VOID_TYPE_P (TREE_TYPE (init))) *arg_p = init; } } /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (arg_p); /* FIXME diagnostics: This will mess up gcc.dg/Warray-bounds.c. */ /* Make sure arguments have the same location as the function call itself. */ protected_set_expr_location (*arg_p, call_location); /* There is a sequence point before a function call. Side effects in the argument list must occur before the actual call. So, when gimplifying arguments, force gimplify_expr to use an internal post queue which is then appended to the end of PRE_P. */ return gimplify_expr (arg_p, pre_p, NULL, test, fb, allow_ssa); } /* Don't fold inside offloading or taskreg regions: it can break code by adding decl references that weren't in the source. We'll do it during omplower pass instead. */ static bool maybe_fold_stmt (gimple_stmt_iterator *gsi) { struct gimplify_omp_ctx *ctx; for (ctx = gimplify_omp_ctxp; ctx; ctx = ctx->outer_context) if ((ctx->region_type & (ORT_TARGET | ORT_PARALLEL | ORT_TASK)) != 0) return false; else if ((ctx->region_type & ORT_HOST_TEAMS) == ORT_HOST_TEAMS) return false; /* Delay folding of builtins until the IL is in consistent state so the diagnostic machinery can do a better job. */ if (gimple_call_builtin_p (gsi_stmt (*gsi))) return false; return fold_stmt (gsi); } /* Gimplify the CALL_EXPR node *EXPR_P into the GIMPLE sequence PRE_P. WANT_VALUE is true if the result of the call is desired. */ static enum gimplify_status gimplify_call_expr (tree *expr_p, gimple_seq *pre_p, bool want_value) { tree fndecl, parms, p, fnptrtype; enum gimplify_status ret; int i, nargs; gcall *call; bool builtin_va_start_p = false; location_t loc = EXPR_LOCATION (*expr_p); gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR); /* For reliable diagnostics during inlining, it is necessary that every call_expr be annotated with file and line. */ if (! EXPR_HAS_LOCATION (*expr_p)) SET_EXPR_LOCATION (*expr_p, input_location); /* Gimplify internal functions created in the FEs. */ if (CALL_EXPR_FN (*expr_p) == NULL_TREE) { if (want_value) return GS_ALL_DONE; nargs = call_expr_nargs (*expr_p); enum internal_fn ifn = CALL_EXPR_IFN (*expr_p); auto_vec<tree> vargs (nargs); for (i = 0; i < nargs; i++) { gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p, EXPR_LOCATION (*expr_p)); vargs.quick_push (CALL_EXPR_ARG (*expr_p, i)); } gcall *call = gimple_build_call_internal_vec (ifn, vargs); gimple_call_set_nothrow (call, TREE_NOTHROW (*expr_p)); gimplify_seq_add_stmt (pre_p, call); return GS_ALL_DONE; } /* This may be a call to a builtin function. Builtin function calls may be transformed into different (and more efficient) builtin function calls under certain circumstances. Unfortunately, gimplification can muck things up enough that the builtin expanders are not aware that certain transformations are still valid. So we attempt transformation/gimplification of the call before we gimplify the CALL_EXPR. At this time we do not manage to transform all calls in the same manner as the expanders do, but we do transform most of them. */ fndecl = get_callee_fndecl (*expr_p); if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)) switch (DECL_FUNCTION_CODE (fndecl)) { CASE_BUILT_IN_ALLOCA: /* If the call has been built for a variable-sized object, then we want to restore the stack level when the enclosing BIND_EXPR is exited to reclaim the allocated space; otherwise, we precisely need to do the opposite and preserve the latest stack level. */ if (CALL_ALLOCA_FOR_VAR_P (*expr_p)) gimplify_ctxp->save_stack = true; else gimplify_ctxp->keep_stack = true; break; case BUILT_IN_VA_START: { builtin_va_start_p = TRUE; if (call_expr_nargs (*expr_p) < 2) { error ("too few arguments to function %<va_start%>"); *expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p)); return GS_OK; } if (fold_builtin_next_arg (*expr_p, true)) { *expr_p = build_empty_stmt (EXPR_LOCATION (*expr_p)); return GS_OK; } break; } case BUILT_IN_EH_RETURN: cfun->calls_eh_return = true; break; default: ; } if (fndecl && fndecl_built_in_p (fndecl)) { tree new_tree = fold_call_expr (input_location, *expr_p, !want_value); if (new_tree && new_tree != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new_tree; return GS_OK; } } /* Remember the original function pointer type. */ fnptrtype = TREE_TYPE (CALL_EXPR_FN (*expr_p)); if (flag_openmp && fndecl && cfun && (cfun->curr_properties & PROP_gimple_any) == 0) { tree variant = omp_resolve_declare_variant (fndecl); if (variant != fndecl) CALL_EXPR_FN (*expr_p) = build1 (ADDR_EXPR, fnptrtype, variant); } /* There is a sequence point before the call, so any side effects in the calling expression must occur before the actual call. Force gimplify_expr to use an internal post queue. */ ret = gimplify_expr (&CALL_EXPR_FN (*expr_p), pre_p, NULL, is_gimple_call_addr, fb_rvalue); nargs = call_expr_nargs (*expr_p); /* Get argument types for verification. */ fndecl = get_callee_fndecl (*expr_p); parms = NULL_TREE; if (fndecl) parms = TYPE_ARG_TYPES (TREE_TYPE (fndecl)); else parms = TYPE_ARG_TYPES (TREE_TYPE (fnptrtype)); if (fndecl && DECL_ARGUMENTS (fndecl)) p = DECL_ARGUMENTS (fndecl); else if (parms) p = parms; else p = NULL_TREE; for (i = 0; i < nargs && p; i++, p = TREE_CHAIN (p)) ; /* If the last argument is __builtin_va_arg_pack () and it is not passed as a named argument, decrease the number of CALL_EXPR arguments and set instead the CALL_EXPR_VA_ARG_PACK flag. */ if (!p && i < nargs && TREE_CODE (CALL_EXPR_ARG (*expr_p, nargs - 1)) == CALL_EXPR) { tree last_arg = CALL_EXPR_ARG (*expr_p, nargs - 1); tree last_arg_fndecl = get_callee_fndecl (last_arg); if (last_arg_fndecl && fndecl_built_in_p (last_arg_fndecl, BUILT_IN_VA_ARG_PACK)) { tree call = *expr_p; --nargs; *expr_p = build_call_array_loc (loc, TREE_TYPE (call), CALL_EXPR_FN (call), nargs, CALL_EXPR_ARGP (call)); /* Copy all CALL_EXPR flags, location and block, except CALL_EXPR_VA_ARG_PACK flag. */ CALL_EXPR_STATIC_CHAIN (*expr_p) = CALL_EXPR_STATIC_CHAIN (call); CALL_EXPR_TAILCALL (*expr_p) = CALL_EXPR_TAILCALL (call); CALL_EXPR_RETURN_SLOT_OPT (*expr_p) = CALL_EXPR_RETURN_SLOT_OPT (call); CALL_FROM_THUNK_P (*expr_p) = CALL_FROM_THUNK_P (call); SET_EXPR_LOCATION (*expr_p, EXPR_LOCATION (call)); /* Set CALL_EXPR_VA_ARG_PACK. */ CALL_EXPR_VA_ARG_PACK (*expr_p) = 1; } } /* If the call returns twice then after building the CFG the call argument computations will no longer dominate the call because we add an abnormal incoming edge to the call. So do not use SSA vars there. */ bool returns_twice = call_expr_flags (*expr_p) & ECF_RETURNS_TWICE; /* Gimplify the function arguments. */ if (nargs > 0) { for (i = (PUSH_ARGS_REVERSED ? nargs - 1 : 0); PUSH_ARGS_REVERSED ? i >= 0 : i < nargs; PUSH_ARGS_REVERSED ? i-- : i++) { enum gimplify_status t; /* Avoid gimplifying the second argument to va_start, which needs to be the plain PARM_DECL. */ if ((i != 1) || !builtin_va_start_p) { t = gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p, EXPR_LOCATION (*expr_p), ! returns_twice); if (t == GS_ERROR) ret = GS_ERROR; } } } /* Gimplify the static chain. */ if (CALL_EXPR_STATIC_CHAIN (*expr_p)) { if (fndecl && !DECL_STATIC_CHAIN (fndecl)) CALL_EXPR_STATIC_CHAIN (*expr_p) = NULL; else { enum gimplify_status t; t = gimplify_arg (&CALL_EXPR_STATIC_CHAIN (*expr_p), pre_p, EXPR_LOCATION (*expr_p), ! returns_twice); if (t == GS_ERROR) ret = GS_ERROR; } } /* Verify the function result. */ if (want_value && fndecl && VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fnptrtype)))) { error_at (loc, "using result of function returning %<void%>"); ret = GS_ERROR; } /* Try this again in case gimplification exposed something. */ if (ret != GS_ERROR) { tree new_tree = fold_call_expr (input_location, *expr_p, !want_value); if (new_tree && new_tree != *expr_p) { /* There was a transformation of this call which computes the same value, but in a more efficient way. Return and try again. */ *expr_p = new_tree; return GS_OK; } } else { *expr_p = error_mark_node; return GS_ERROR; } /* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its decl. This allows us to eliminate redundant or useless calls to "const" functions. */ if (TREE_CODE (*expr_p) == CALL_EXPR) { int flags = call_expr_flags (*expr_p); if (flags & (ECF_CONST | ECF_PURE) /* An infinite loop is considered a side effect. */ && !(flags & (ECF_LOOPING_CONST_OR_PURE))) TREE_SIDE_EFFECTS (*expr_p) = 0; } /* If the value is not needed by the caller, emit a new GIMPLE_CALL and clear *EXPR_P. Otherwise, leave *EXPR_P in its gimplified form and delegate the creation of a GIMPLE_CALL to gimplify_modify_expr. This is always possible because when WANT_VALUE is true, the caller wants the result of this call into a temporary, which means that we will emit an INIT_EXPR in internal_get_tmp_var which will then be handled by gimplify_modify_expr. */ if (!want_value) { /* The CALL_EXPR in *EXPR_P is already in GIMPLE form, so all we have to do is replicate it as a GIMPLE_CALL tuple. */ gimple_stmt_iterator gsi; call = gimple_build_call_from_tree (*expr_p, fnptrtype); notice_special_calls (call); gimplify_seq_add_stmt (pre_p, call); gsi = gsi_last (*pre_p); maybe_fold_stmt (&gsi); *expr_p = NULL_TREE; } else /* Remember the original function type. */ CALL_EXPR_FN (*expr_p) = build1 (NOP_EXPR, fnptrtype, CALL_EXPR_FN (*expr_p)); return ret; } /* Handle shortcut semantics in the predicate operand of a COND_EXPR by rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs. TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the condition is true or false, respectively. If null, we should generate our own to skip over the evaluation of this specific expression. LOCUS is the source location of the COND_EXPR. This function is the tree equivalent of do_jump. shortcut_cond_r should only be called by shortcut_cond_expr. */ static tree shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p, location_t locus) { tree local_label = NULL_TREE; tree t, expr = NULL; /* OK, it's not a simple case; we need to pull apart the COND_EXPR to retain the shortcut semantics. Just insert the gotos here; shortcut_cond_expr will append the real blocks later. */ if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { location_t new_locus; /* Turn if (a && b) into if (a); else goto no; if (b) goto yes; else goto no; (no:) */ if (false_label_p == NULL) false_label_p = &local_label; /* Keep the original source location on the first 'if'. */ t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p, locus); append_to_statement_list (t, &expr); /* Set the source location of the && on the second 'if'. */ new_locus = rexpr_location (pred, locus); t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p, new_locus); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { location_t new_locus; /* Turn if (a || b) into if (a) goto yes; if (b) goto yes; else goto no; (yes:) */ if (true_label_p == NULL) true_label_p = &local_label; /* Keep the original source location on the first 'if'. */ t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL, locus); append_to_statement_list (t, &expr); /* Set the source location of the || on the second 'if'. */ new_locus = rexpr_location (pred, locus); t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p, new_locus); append_to_statement_list (t, &expr); } else if (TREE_CODE (pred) == COND_EXPR && !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 1))) && !VOID_TYPE_P (TREE_TYPE (TREE_OPERAND (pred, 2)))) { location_t new_locus; /* As long as we're messing with gotos, turn if (a ? b : c) into if (a) if (b) goto yes; else goto no; else if (c) goto yes; else goto no; Don't do this if one of the arms has void type, which can happen in C++ when the arm is throw. */ /* Keep the original source location on the first 'if'. Set the source location of the ? on the second 'if'. */ new_locus = rexpr_location (pred, locus); expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0), shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p, false_label_p, locus), shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p, false_label_p, new_locus)); } else { expr = build3 (COND_EXPR, void_type_node, pred, build_and_jump (true_label_p), build_and_jump (false_label_p)); SET_EXPR_LOCATION (expr, locus); } if (local_label) { t = build1 (LABEL_EXPR, void_type_node, local_label); append_to_statement_list (t, &expr); } return expr; } /* If EXPR is a GOTO_EXPR, return it. If it is a STATEMENT_LIST, skip any of its leading DEBUG_BEGIN_STMTS and recurse on the subsequent statement, if it is the last one. Otherwise, return NULL. */ static tree find_goto (tree expr) { if (!expr) return NULL_TREE; if (TREE_CODE (expr) == GOTO_EXPR) return expr; if (TREE_CODE (expr) != STATEMENT_LIST) return NULL_TREE; tree_stmt_iterator i = tsi_start (expr); while (!tsi_end_p (i) && TREE_CODE (tsi_stmt (i)) == DEBUG_BEGIN_STMT) tsi_next (&i); if (!tsi_one_before_end_p (i)) return NULL_TREE; return find_goto (tsi_stmt (i)); } /* Same as find_goto, except that it returns NULL if the destination is not a LABEL_DECL. */ static inline tree find_goto_label (tree expr) { tree dest = find_goto (expr); if (dest && TREE_CODE (GOTO_DESTINATION (dest)) == LABEL_DECL) return dest; return NULL_TREE; } /* Given a conditional expression EXPR with short-circuit boolean predicates using TRUTH_ANDIF_EXPR or TRUTH_ORIF_EXPR, break the predicate apart into the equivalent sequence of conditionals. */ static tree shortcut_cond_expr (tree expr) { tree pred = TREE_OPERAND (expr, 0); tree then_ = TREE_OPERAND (expr, 1); tree else_ = TREE_OPERAND (expr, 2); tree true_label, false_label, end_label, t; tree *true_label_p; tree *false_label_p; bool emit_end, emit_false, jump_over_else; bool then_se = then_ && TREE_SIDE_EFFECTS (then_); bool else_se = else_ && TREE_SIDE_EFFECTS (else_); /* First do simple transformations. */ if (!else_se) { /* If there is no 'else', turn if (a && b) then c into if (a) if (b) then c. */ while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR) { /* Keep the original source location on the first 'if'. */ location_t locus = EXPR_LOC_OR_LOC (expr, input_location); TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); /* Set the source location of the && on the second 'if'. */ if (rexpr_has_location (pred)) SET_EXPR_LOCATION (expr, rexpr_location (pred)); then_ = shortcut_cond_expr (expr); then_se = then_ && TREE_SIDE_EFFECTS (then_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, then_, NULL_TREE); SET_EXPR_LOCATION (expr, locus); } } if (!then_se) { /* If there is no 'then', turn if (a || b); else d into if (a); else if (b); else d. */ while (TREE_CODE (pred) == TRUTH_ORIF_EXPR) { /* Keep the original source location on the first 'if'. */ location_t locus = EXPR_LOC_OR_LOC (expr, input_location); TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1); /* Set the source location of the || on the second 'if'. */ if (rexpr_has_location (pred)) SET_EXPR_LOCATION (expr, rexpr_location (pred)); else_ = shortcut_cond_expr (expr); else_se = else_ && TREE_SIDE_EFFECTS (else_); pred = TREE_OPERAND (pred, 0); expr = build3 (COND_EXPR, void_type_node, pred, NULL_TREE, else_); SET_EXPR_LOCATION (expr, locus); } } /* If we're done, great. */ if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR && TREE_CODE (pred) != TRUTH_ORIF_EXPR) return expr; /* Otherwise we need to mess with gotos. Change if (a) c; else d; to if (a); else goto no; c; goto end; no: d; end: and recursively gimplify the condition. */ true_label = false_label = end_label = NULL_TREE; /* If our arms just jump somewhere, hijack those labels so we don't generate jumps to jumps. */ if (tree then_goto = find_goto_label (then_)) { true_label = GOTO_DESTINATION (then_goto); then_ = NULL; then_se = false; } if (tree else_goto = find_goto_label (else_)) { false_label = GOTO_DESTINATION (else_goto); else_ = NULL; else_se = false; } /* If we aren't hijacking a label for the 'then' branch, it falls through. */ if (true_label) true_label_p = &true_label; else true_label_p = NULL; /* The 'else' branch also needs a label if it contains interesting code. */ if (false_label || else_se) false_label_p = &false_label; else false_label_p = NULL; /* If there was nothing else in our arms, just forward the label(s). */ if (!then_se && !else_se) return shortcut_cond_r (pred, true_label_p, false_label_p, EXPR_LOC_OR_LOC (expr, input_location)); /* If our last subexpression already has a terminal label, reuse it. */ if (else_se) t = expr_last (else_); else if (then_se) t = expr_last (then_); else t = NULL; if (t && TREE_CODE (t) == LABEL_EXPR) end_label = LABEL_EXPR_LABEL (t); /* If we don't care about jumping to the 'else' branch, jump to the end if the condition is false. */ if (!false_label_p) false_label_p = &end_label; /* We only want to emit these labels if we aren't hijacking them. */ emit_end = (end_label == NULL_TREE); emit_false = (false_label == NULL_TREE); /* We only emit the jump over the else clause if we have to--if the then clause may fall through. Otherwise we can wind up with a useless jump and a useless label at the end of gimplified code, which will cause us to think that this conditional as a whole falls through even if it doesn't. If we then inline a function which ends with such a condition, that can cause us to issue an inappropriate warning about control reaching the end of a non-void function. */ jump_over_else = block_may_fallthru (then_); pred = shortcut_cond_r (pred, true_label_p, false_label_p, EXPR_LOC_OR_LOC (expr, input_location)); expr = NULL; append_to_statement_list (pred, &expr); append_to_statement_list (then_, &expr); if (else_se) { if (jump_over_else) { tree last = expr_last (expr); t = build_and_jump (&end_label); if (rexpr_has_location (last)) SET_EXPR_LOCATION (t, rexpr_location (last)); append_to_statement_list (t, &expr); } if (emit_false) { t = build1 (LABEL_EXPR, void_type_node, false_label); append_to_statement_list (t, &expr); } append_to_statement_list (else_, &expr); } if (emit_end && end_label) { t = build1 (LABEL_EXPR, void_type_node, end_label); append_to_statement_list (t, &expr); } return expr; } /* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */ tree gimple_boolify (tree expr) { tree type = TREE_TYPE (expr); location_t loc = EXPR_LOCATION (expr); if (TREE_CODE (expr) == NE_EXPR && TREE_CODE (TREE_OPERAND (expr, 0)) == CALL_EXPR && integer_zerop (TREE_OPERAND (expr, 1))) { tree call = TREE_OPERAND (expr, 0); tree fn = get_callee_fndecl (call); /* For __builtin_expect ((long) (x), y) recurse into x as well if x is truth_value_p. */ if (fn && fndecl_built_in_p (fn, BUILT_IN_EXPECT) && call_expr_nargs (call) == 2) { tree arg = CALL_EXPR_ARG (call, 0); if (arg) { if (TREE_CODE (arg) == NOP_EXPR && TREE_TYPE (arg) == TREE_TYPE (call)) arg = TREE_OPERAND (arg, 0); if (truth_value_p (TREE_CODE (arg))) { arg = gimple_boolify (arg); CALL_EXPR_ARG (call, 0) = fold_convert_loc (loc, TREE_TYPE (call), arg); } } } } switch (TREE_CODE (expr)) { case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: /* Also boolify the arguments of truth exprs. */ TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1)); /* FALLTHRU */ case TRUTH_NOT_EXPR: TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* These expressions always produce boolean results. */ if (TREE_CODE (type) != BOOLEAN_TYPE) TREE_TYPE (expr) = boolean_type_node; return expr; case ANNOTATE_EXPR: switch ((enum annot_expr_kind) TREE_INT_CST_LOW (TREE_OPERAND (expr, 1))) { case annot_expr_ivdep_kind: case annot_expr_unroll_kind: case annot_expr_no_vector_kind: case annot_expr_vector_kind: case annot_expr_parallel_kind: TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); if (TREE_CODE (type) != BOOLEAN_TYPE) TREE_TYPE (expr) = boolean_type_node; return expr; default: gcc_unreachable (); } default: if (COMPARISON_CLASS_P (expr)) { /* There expressions always prduce boolean results. */ if (TREE_CODE (type) != BOOLEAN_TYPE) TREE_TYPE (expr) = boolean_type_node; return expr; } /* Other expressions that get here must have boolean values, but might need to be converted to the appropriate mode. */ if (TREE_CODE (type) == BOOLEAN_TYPE) return expr; return fold_convert_loc (loc, boolean_type_node, expr); } } /* Given a conditional expression *EXPR_P without side effects, gimplify its operands. New statements are inserted to PRE_P. */ static enum gimplify_status gimplify_pure_cond_expr (tree *expr_p, gimple_seq *pre_p) { tree expr = *expr_p, cond; enum gimplify_status ret, tret; enum tree_code code; cond = gimple_boolify (COND_EXPR_COND (expr)); /* We need to handle && and || specially, as their gimplification creates pure cond_expr, thus leading to an infinite cycle otherwise. */ code = TREE_CODE (cond); if (code == TRUTH_ANDIF_EXPR) TREE_SET_CODE (cond, TRUTH_AND_EXPR); else if (code == TRUTH_ORIF_EXPR) TREE_SET_CODE (cond, TRUTH_OR_EXPR); ret = gimplify_expr (&cond, pre_p, NULL, is_gimple_condexpr, fb_rvalue); COND_EXPR_COND (*expr_p) = cond; tret = gimplify_expr (&COND_EXPR_THEN (expr), pre_p, NULL, is_gimple_val, fb_rvalue); ret = MIN (ret, tret); tret = gimplify_expr (&COND_EXPR_ELSE (expr), pre_p, NULL, is_gimple_val, fb_rvalue); return MIN (ret, tret); } /* Return true if evaluating EXPR could trap. EXPR is GENERIC, while tree_could_trap_p can be called only on GIMPLE. */ bool generic_expr_could_trap_p (tree expr) { unsigned i, n; if (!expr || is_gimple_val (expr)) return false; if (!EXPR_P (expr) || tree_could_trap_p (expr)) return true; n = TREE_OPERAND_LENGTH (expr); for (i = 0; i < n; i++) if (generic_expr_could_trap_p (TREE_OPERAND (expr, i))) return true; return false; } /* Convert the conditional expression pointed to by EXPR_P '(p) ? a : b;' into if (p) if (p) t1 = a; a; else or else t1 = b; b; t1; The second form is used when *EXPR_P is of type void. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_cond_expr (tree *expr_p, gimple_seq *pre_p, fallback_t fallback) { tree expr = *expr_p; tree type = TREE_TYPE (expr); location_t loc = EXPR_LOCATION (expr); tree tmp, arm1, arm2; enum gimplify_status ret; tree label_true, label_false, label_cont; bool have_then_clause_p, have_else_clause_p; gcond *cond_stmt; enum tree_code pred_code; gimple_seq seq = NULL; /* If this COND_EXPR has a value, copy the values into a temporary within the arms. */ if (!VOID_TYPE_P (type)) { tree then_ = TREE_OPERAND (expr, 1), else_ = TREE_OPERAND (expr, 2); tree result; /* If either an rvalue is ok or we do not require an lvalue, create the temporary. But we cannot do that if the type is addressable. */ if (((fallback & fb_rvalue) || !(fallback & fb_lvalue)) && !TREE_ADDRESSABLE (type)) { if (gimplify_ctxp->allow_rhs_cond_expr /* If either branch has side effects or could trap, it can't be evaluated unconditionally. */ && !TREE_SIDE_EFFECTS (then_) && !generic_expr_could_trap_p (then_) && !TREE_SIDE_EFFECTS (else_) && !generic_expr_could_trap_p (else_)) return gimplify_pure_cond_expr (expr_p, pre_p); tmp = create_tmp_var (type, "iftmp"); result = tmp; } /* Otherwise, only create and copy references to the values. */ else { type = build_pointer_type (type); if (!VOID_TYPE_P (TREE_TYPE (then_))) then_ = build_fold_addr_expr_loc (loc, then_); if (!VOID_TYPE_P (TREE_TYPE (else_))) else_ = build_fold_addr_expr_loc (loc, else_); expr = build3 (COND_EXPR, type, TREE_OPERAND (expr, 0), then_, else_); tmp = create_tmp_var (type, "iftmp"); result = build_simple_mem_ref_loc (loc, tmp); } /* Build the new then clause, `tmp = then_;'. But don't build the assignment if the value is void; in C++ it can be if it's a throw. */ if (!VOID_TYPE_P (TREE_TYPE (then_))) TREE_OPERAND (expr, 1) = build2 (INIT_EXPR, type, tmp, then_); /* Similarly, build the new else clause, `tmp = else_;'. */ if (!VOID_TYPE_P (TREE_TYPE (else_))) TREE_OPERAND (expr, 2) = build2 (INIT_EXPR, type, tmp, else_); TREE_TYPE (expr) = void_type_node; recalculate_side_effects (expr); /* Move the COND_EXPR to the prequeue. */ gimplify_stmt (&expr, pre_p); *expr_p = result; return GS_ALL_DONE; } /* Remove any COMPOUND_EXPR so the following cases will be caught. */ STRIP_TYPE_NOPS (TREE_OPERAND (expr, 0)); if (TREE_CODE (TREE_OPERAND (expr, 0)) == COMPOUND_EXPR) gimplify_compound_expr (&TREE_OPERAND (expr, 0), pre_p, true); /* Make sure the condition has BOOLEAN_TYPE. */ TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0)); /* Break apart && and || conditions. */ if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR || TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR) { expr = shortcut_cond_expr (expr); if (expr != *expr_p) { *expr_p = expr; /* We can't rely on gimplify_expr to re-gimplify the expanded form properly, as cleanups might cause the target labels to be wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to set up a conditional context. */ gimple_push_condition (); gimplify_stmt (expr_p, &seq); gimple_pop_condition (pre_p); gimple_seq_add_seq (pre_p, seq); return GS_ALL_DONE; } } /* Now do the normal gimplification. */ /* Gimplify condition. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL, is_gimple_condexpr_for_cond, fb_rvalue); if (ret == GS_ERROR) return GS_ERROR; gcc_assert (TREE_OPERAND (expr, 0) != NULL_TREE); gimple_push_condition (); have_then_clause_p = have_else_clause_p = false; label_true = find_goto_label (TREE_OPERAND (expr, 1)); if (label_true && DECL_CONTEXT (GOTO_DESTINATION (label_true)) == current_function_decl /* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR have different locations, otherwise we end up with incorrect location information on the branches. */ && (optimize || !EXPR_HAS_LOCATION (expr) || !rexpr_has_location (label_true) || EXPR_LOCATION (expr) == rexpr_location (label_true))) { have_then_clause_p = true; label_true = GOTO_DESTINATION (label_true); } else label_true = create_artificial_label (UNKNOWN_LOCATION); label_false = find_goto_label (TREE_OPERAND (expr, 2)); if (label_false && DECL_CONTEXT (GOTO_DESTINATION (label_false)) == current_function_decl /* For -O0 avoid this optimization if the COND_EXPR and GOTO_EXPR have different locations, otherwise we end up with incorrect location information on the branches. */ && (optimize || !EXPR_HAS_LOCATION (expr) || !rexpr_has_location (label_false) || EXPR_LOCATION (expr) == rexpr_location (label_false))) { have_else_clause_p = true; label_false = GOTO_DESTINATION (label_false); } else label_false = create_artificial_label (UNKNOWN_LOCATION); gimple_cond_get_ops_from_tree (COND_EXPR_COND (expr), &pred_code, &arm1, &arm2); cond_stmt = gimple_build_cond (pred_code, arm1, arm2, label_true, label_false); gimple_set_no_warning (cond_stmt, TREE_NO_WARNING (COND_EXPR_COND (expr))); gimplify_seq_add_stmt (&seq, cond_stmt); gimple_stmt_iterator gsi = gsi_last (seq); maybe_fold_stmt (&gsi); label_cont = NULL_TREE; if (!have_then_clause_p) { /* For if (...) {} else { code; } put label_true after the else block. */ if (TREE_OPERAND (expr, 1) == NULL_TREE && !have_else_clause_p && TREE_OPERAND (expr, 2) != NULL_TREE) label_cont = label_true; else { gimplify_seq_add_stmt (&seq, gimple_build_label (label_true)); have_then_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 1), &seq); /* For if (...) { code; } else {} or if (...) { code; } else goto label; or if (...) { code; return; } else { ... } label_cont isn't needed. */ if (!have_else_clause_p && TREE_OPERAND (expr, 2) != NULL_TREE && gimple_seq_may_fallthru (seq)) { gimple *g; label_cont = create_artificial_label (UNKNOWN_LOCATION); g = gimple_build_goto (label_cont); /* GIMPLE_COND's are very low level; they have embedded gotos. This particular embedded goto should not be marked with the location of the original COND_EXPR, as it would correspond to the COND_EXPR's condition, not the ELSE or the THEN arms. To avoid marking it with the wrong location, flag it as "no location". */ gimple_set_do_not_emit_location (g); gimplify_seq_add_stmt (&seq, g); } } } if (!have_else_clause_p) { gimplify_seq_add_stmt (&seq, gimple_build_label (label_false)); have_else_clause_p = gimplify_stmt (&TREE_OPERAND (expr, 2), &seq); } if (label_cont) gimplify_seq_add_stmt (&seq, gimple_build_label (label_cont)); gimple_pop_condition (pre_p); gimple_seq_add_seq (pre_p, seq); if (ret == GS_ERROR) ; /* Do nothing. */ else if (have_then_clause_p || have_else_clause_p) ret = GS_ALL_DONE; else { /* Both arms are empty; replace the COND_EXPR with its predicate. */ expr = TREE_OPERAND (expr, 0); gimplify_stmt (&expr, pre_p); } *expr_p = NULL; return ret; } /* Prepare the node pointed to by EXPR_P, an is_gimple_addressable expression, to be marked addressable. We cannot rely on such an expression being directly markable if a temporary has been created by the gimplification. In this case, we create another temporary and initialize it with a copy, which will become a store after we mark it addressable. This can happen if the front-end passed us something that it could not mark addressable yet, like a Fortran pass-by-reference parameter (int) floatvar. */ static void prepare_gimple_addressable (tree *expr_p, gimple_seq *seq_p) { while (handled_component_p (*expr_p)) expr_p = &TREE_OPERAND (*expr_p, 0); if (is_gimple_reg (*expr_p)) { /* Do not allow an SSA name as the temporary. */ tree var = get_initialized_tmp_var (*expr_p, seq_p, NULL, false); DECL_NOT_GIMPLE_REG_P (var) = 1; *expr_p = var; } } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memcpy. */ static enum gimplify_status gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value, gimple_seq *seq_p) { tree t, to, to_ptr, from, from_ptr; gcall *gs; location_t loc = EXPR_LOCATION (*expr_p); to = TREE_OPERAND (*expr_p, 0); from = TREE_OPERAND (*expr_p, 1); /* Mark the RHS addressable. Beware that it may not be possible to do so directly if a temporary has been created by the gimplification. */ prepare_gimple_addressable (&from, seq_p); mark_addressable (from); from_ptr = build_fold_addr_expr_loc (loc, from); gimplify_arg (&from_ptr, seq_p, loc); mark_addressable (to); to_ptr = build_fold_addr_expr_loc (loc, to); gimplify_arg (&to_ptr, seq_p, loc); t = builtin_decl_implicit (BUILT_IN_MEMCPY); gs = gimple_build_call (t, 3, to_ptr, from_ptr, size); gimple_call_set_alloca_for_var (gs, true); if (want_value) { /* tmp = memcpy() */ t = create_tmp_var (TREE_TYPE (to_ptr)); gimple_call_set_lhs (gs, t); gimplify_seq_add_stmt (seq_p, gs); *expr_p = build_simple_mem_ref (t); return GS_ALL_DONE; } gimplify_seq_add_stmt (seq_p, gs); *expr_p = NULL; return GS_ALL_DONE; } /* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with a call to __builtin_memset. In this case we know that the RHS is a CONSTRUCTOR with an empty element list. */ static enum gimplify_status gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value, gimple_seq *seq_p) { tree t, from, to, to_ptr; gcall *gs; location_t loc = EXPR_LOCATION (*expr_p); /* Assert our assumptions, to abort instead of producing wrong code silently if they are not met. Beware that the RHS CONSTRUCTOR might not be immediately exposed. */ from = TREE_OPERAND (*expr_p, 1); if (TREE_CODE (from) == WITH_SIZE_EXPR) from = TREE_OPERAND (from, 0); gcc_assert (TREE_CODE (from) == CONSTRUCTOR && vec_safe_is_empty (CONSTRUCTOR_ELTS (from))); /* Now proceed. */ to = TREE_OPERAND (*expr_p, 0); to_ptr = build_fold_addr_expr_loc (loc, to); gimplify_arg (&to_ptr, seq_p, loc); t = builtin_decl_implicit (BUILT_IN_MEMSET); gs = gimple_build_call (t, 3, to_ptr, integer_zero_node, size); if (want_value) { /* tmp = memset() */ t = create_tmp_var (TREE_TYPE (to_ptr)); gimple_call_set_lhs (gs, t); gimplify_seq_add_stmt (seq_p, gs); *expr_p = build1 (INDIRECT_REF, TREE_TYPE (to), t); return GS_ALL_DONE; } gimplify_seq_add_stmt (seq_p, gs); *expr_p = NULL; return GS_ALL_DONE; } /* A subroutine of gimplify_init_ctor_preeval. Called via walk_tree, determine, cautiously, if a CONSTRUCTOR overlaps the lhs of an assignment. Return non-null if we detect a potential overlap. */ struct gimplify_init_ctor_preeval_data { /* The base decl of the lhs object. May be NULL, in which case we have to assume the lhs is indirect. */ tree lhs_base_decl; /* The alias set of the lhs object. */ alias_set_type lhs_alias_set; }; static tree gimplify_init_ctor_preeval_1 (tree *tp, int *walk_subtrees, void *xdata) { struct gimplify_init_ctor_preeval_data *data = (struct gimplify_init_ctor_preeval_data *) xdata; tree t = *tp; /* If we find the base object, obviously we have overlap. */ if (data->lhs_base_decl == t) return t; /* If the constructor component is indirect, determine if we have a potential overlap with the lhs. The only bits of information we have to go on at this point are addressability and alias sets. */ if ((INDIRECT_REF_P (t) || TREE_CODE (t) == MEM_REF) && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (t))) return t; /* If the constructor component is a call, determine if it can hide a potential overlap with the lhs through an INDIRECT_REF like above. ??? Ugh - this is completely broken. In fact this whole analysis doesn't look conservative. */ if (TREE_CODE (t) == CALL_EXPR) { tree type, fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (t))); for (type = TYPE_ARG_TYPES (fntype); type; type = TREE_CHAIN (type)) if (POINTER_TYPE_P (TREE_VALUE (type)) && (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl)) && alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (TREE_TYPE (TREE_VALUE (type))))) return t; } if (IS_TYPE_OR_DECL_P (t)) *walk_subtrees = 0; return NULL; } /* A subroutine of gimplify_init_constructor. Pre-evaluate EXPR, force values that overlap with the lhs (as described by *DATA) into temporaries. */ static void gimplify_init_ctor_preeval (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, struct gimplify_init_ctor_preeval_data *data) { enum gimplify_status one; /* If the value is constant, then there's nothing to pre-evaluate. */ if (TREE_CONSTANT (*expr_p)) { /* Ensure it does not have side effects, it might contain a reference to the object we're initializing. */ gcc_assert (!TREE_SIDE_EFFECTS (*expr_p)); return; } /* If the type has non-trivial constructors, we can't pre-evaluate. */ if (TREE_ADDRESSABLE (TREE_TYPE (*expr_p))) return; /* Recurse for nested constructors. */ if (TREE_CODE (*expr_p) == CONSTRUCTOR) { unsigned HOST_WIDE_INT ix; constructor_elt *ce; vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (*expr_p); FOR_EACH_VEC_SAFE_ELT (v, ix, ce) gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data); return; } /* If this is a variable sized type, we must remember the size. */ maybe_with_size_expr (expr_p); /* Gimplify the constructor element to something appropriate for the rhs of a MODIFY_EXPR. Given that we know the LHS is an aggregate, we know the gimplifier will consider this a store to memory. Doing this gimplification now means that we won't have to deal with complicated language-specific trees, nor trees like SAVE_EXPR that can induce exponential search behavior. */ one = gimplify_expr (expr_p, pre_p, post_p, is_gimple_mem_rhs, fb_rvalue); if (one == GS_ERROR) { *expr_p = NULL; return; } /* If we gimplified to a bare decl, we can be sure that it doesn't overlap with the lhs, since "a = { .x=a }" doesn't make sense. This will always be true for all scalars, since is_gimple_mem_rhs insists on a temporary variable for them. */ if (DECL_P (*expr_p)) return; /* If this is of variable size, we have no choice but to assume it doesn't overlap since we can't make a temporary for it. */ if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST) return; /* Otherwise, we must search for overlap ... */ if (!walk_tree (expr_p, gimplify_init_ctor_preeval_1, data, NULL)) return; /* ... and if found, force the value into a temporary. */ *expr_p = get_formal_tmp_var (*expr_p, pre_p); } /* A subroutine of gimplify_init_ctor_eval. Create a loop for a RANGE_EXPR in a CONSTRUCTOR for an array. var = lower; loop_entry: object[var] = value; if (var == upper) goto loop_exit; var = var + 1; goto loop_entry; loop_exit: We increment var _after_ the loop exit check because we might otherwise fail if upper == TYPE_MAX_VALUE (type for upper). Note that we never have to deal with SAVE_EXPRs here, because this has already been taken care of for us, in gimplify_init_ctor_preeval(). */ static void gimplify_init_ctor_eval (tree, vec<constructor_elt, va_gc> *, gimple_seq *, bool); static void gimplify_init_ctor_eval_range (tree object, tree lower, tree upper, tree value, tree array_elt_type, gimple_seq *pre_p, bool cleared) { tree loop_entry_label, loop_exit_label, fall_thru_label; tree var, var_type, cref, tmp; loop_entry_label = create_artificial_label (UNKNOWN_LOCATION); loop_exit_label = create_artificial_label (UNKNOWN_LOCATION); fall_thru_label = create_artificial_label (UNKNOWN_LOCATION); /* Create and initialize the index variable. */ var_type = TREE_TYPE (upper); var = create_tmp_var (var_type); gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, lower)); /* Add the loop entry label. */ gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_entry_label)); /* Build the reference. */ cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), var, NULL_TREE, NULL_TREE); /* If we are a constructor, just call gimplify_init_ctor_eval to do the store. Otherwise just assign value to the reference. */ if (TREE_CODE (value) == CONSTRUCTOR) /* NB we might have to call ourself recursively through gimplify_init_ctor_eval if the value is a constructor. */ gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else gimplify_seq_add_stmt (pre_p, gimple_build_assign (cref, value)); /* We exit the loop when the index var is equal to the upper bound. */ gimplify_seq_add_stmt (pre_p, gimple_build_cond (EQ_EXPR, var, upper, loop_exit_label, fall_thru_label)); gimplify_seq_add_stmt (pre_p, gimple_build_label (fall_thru_label)); /* Otherwise, increment the index var... */ tmp = build2 (PLUS_EXPR, var_type, var, fold_convert (var_type, integer_one_node)); gimplify_seq_add_stmt (pre_p, gimple_build_assign (var, tmp)); /* ...and jump back to the loop entry. */ gimplify_seq_add_stmt (pre_p, gimple_build_goto (loop_entry_label)); /* Add the loop exit label. */ gimplify_seq_add_stmt (pre_p, gimple_build_label (loop_exit_label)); } /* Return true if FDECL is accessing a field that is zero sized. */ static bool zero_sized_field_decl (const_tree fdecl) { if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl) && integer_zerop (DECL_SIZE (fdecl))) return true; return false; } /* Return true if TYPE is zero sized. */ static bool zero_sized_type (const_tree type) { if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type) && integer_zerop (TYPE_SIZE (type))) return true; return false; } /* A subroutine of gimplify_init_constructor. Generate individual MODIFY_EXPRs for a CONSTRUCTOR. OBJECT is the LHS against which the assignments should happen. ELTS is the CONSTRUCTOR_ELTS of the CONSTRUCTOR. CLEARED is true if the entire LHS object has been zeroed first. */ static void gimplify_init_ctor_eval (tree object, vec<constructor_elt, va_gc> *elts, gimple_seq *pre_p, bool cleared) { tree array_elt_type = NULL; unsigned HOST_WIDE_INT ix; tree purpose, value; if (TREE_CODE (TREE_TYPE (object)) == ARRAY_TYPE) array_elt_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object))); FOR_EACH_CONSTRUCTOR_ELT (elts, ix, purpose, value) { tree cref; /* NULL values are created above for gimplification errors. */ if (value == NULL) continue; if (cleared && initializer_zerop (value)) continue; /* ??? Here's to hoping the front end fills in all of the indices, so we don't have to figure out what's missing ourselves. */ gcc_assert (purpose); /* Skip zero-sized fields, unless value has side-effects. This can happen with calls to functions returning a zero-sized type, which we shouldn't discard. As a number of downstream passes don't expect sets of zero-sized fields, we rely on the gimplification of the MODIFY_EXPR we make below to drop the assignment statement. */ if (! TREE_SIDE_EFFECTS (value) && zero_sized_field_decl (purpose)) continue; /* If we have a RANGE_EXPR, we have to build a loop to assign the whole range. */ if (TREE_CODE (purpose) == RANGE_EXPR) { tree lower = TREE_OPERAND (purpose, 0); tree upper = TREE_OPERAND (purpose, 1); /* If the lower bound is equal to upper, just treat it as if upper was the index. */ if (simple_cst_equal (lower, upper)) purpose = upper; else { gimplify_init_ctor_eval_range (object, lower, upper, value, array_elt_type, pre_p, cleared); continue; } } if (array_elt_type) { /* Do not use bitsizetype for ARRAY_REF indices. */ if (TYPE_DOMAIN (TREE_TYPE (object))) purpose = fold_convert (TREE_TYPE (TYPE_DOMAIN (TREE_TYPE (object))), purpose); cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object), purpose, NULL_TREE, NULL_TREE); } else { gcc_assert (TREE_CODE (purpose) == FIELD_DECL); cref = build3 (COMPONENT_REF, TREE_TYPE (purpose), unshare_expr (object), purpose, NULL_TREE); } if (TREE_CODE (value) == CONSTRUCTOR && TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE) gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value), pre_p, cleared); else { tree init = build2 (INIT_EXPR, TREE_TYPE (cref), cref, value); gimplify_and_add (init, pre_p); ggc_free (init); } } } /* Return the appropriate RHS predicate for this LHS. */ gimple_predicate rhs_predicate_for (tree lhs) { if (is_gimple_reg (lhs)) return is_gimple_reg_rhs_or_call; else return is_gimple_mem_rhs_or_call; } /* Return the initial guess for an appropriate RHS predicate for this LHS, before the LHS has been gimplified. */ static gimple_predicate initial_rhs_predicate_for (tree lhs) { if (is_gimple_reg_type (TREE_TYPE (lhs))) return is_gimple_reg_rhs_or_call; else return is_gimple_mem_rhs_or_call; } /* Gimplify a C99 compound literal expression. This just means adding the DECL_EXPR before the current statement and using its anonymous decl instead. */ static enum gimplify_status gimplify_compound_literal_expr (tree *expr_p, gimple_seq *pre_p, bool (*gimple_test_f) (tree), fallback_t fallback) { tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (*expr_p); tree decl = DECL_EXPR_DECL (decl_s); tree init = DECL_INITIAL (decl); /* Mark the decl as addressable if the compound literal expression is addressable now, otherwise it is marked too late after we gimplify the initialization expression. */ if (TREE_ADDRESSABLE (*expr_p)) TREE_ADDRESSABLE (decl) = 1; /* Otherwise, if we don't need an lvalue and have a literal directly substitute it. Check if it matches the gimple predicate, as otherwise we'd generate a new temporary, and we can as well just use the decl we already have. */ else if (!TREE_ADDRESSABLE (decl) && !TREE_THIS_VOLATILE (decl) && init && (fallback & fb_lvalue) == 0 && gimple_test_f (init)) { *expr_p = init; return GS_OK; } /* If the decl is not addressable, then it is being used in some expression or on the right hand side of a statement, and it can be put into a readonly data section. */ if (!TREE_ADDRESSABLE (decl) && (fallback & fb_lvalue) == 0) TREE_READONLY (decl) = 1; /* This decl isn't mentioned in the enclosing block, so add it to the list of temps. FIXME it seems a bit of a kludge to say that anonymous artificial vars aren't pushed, but everything else is. */ if (DECL_NAME (decl) == NULL_TREE && !DECL_SEEN_IN_BIND_EXPR_P (decl)) gimple_add_tmp_var (decl); gimplify_and_add (decl_s, pre_p); *expr_p = decl; return GS_OK; } /* Optimize embedded COMPOUND_LITERAL_EXPRs within a CONSTRUCTOR, return a new CONSTRUCTOR if something changed. */ static tree optimize_compound_literals_in_ctor (tree orig_ctor) { tree ctor = orig_ctor; vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (ctor); unsigned int idx, num = vec_safe_length (elts); for (idx = 0; idx < num; idx++) { tree value = (*elts)[idx].value; tree newval = value; if (TREE_CODE (value) == CONSTRUCTOR) newval = optimize_compound_literals_in_ctor (value); else if (TREE_CODE (value) == COMPOUND_LITERAL_EXPR) { tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (value); tree decl = DECL_EXPR_DECL (decl_s); tree init = DECL_INITIAL (decl); if (!TREE_ADDRESSABLE (value) && !TREE_ADDRESSABLE (decl) && init && TREE_CODE (init) == CONSTRUCTOR) newval = optimize_compound_literals_in_ctor (init); } if (newval == value) continue; if (ctor == orig_ctor) { ctor = copy_node (orig_ctor); CONSTRUCTOR_ELTS (ctor) = vec_safe_copy (elts); elts = CONSTRUCTOR_ELTS (ctor); } (*elts)[idx].value = newval; } return ctor; } /* A subroutine of gimplify_modify_expr. Break out elements of a CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs. Note that we still need to clear any elements that don't have explicit initializers, so if not all elements are initialized we keep the original MODIFY_EXPR, we just remove all of the constructor elements. If NOTIFY_TEMP_CREATION is true, do not gimplify, just return GS_ERROR if we would have to create a temporary when gimplifying this constructor. Otherwise, return GS_OK. If NOTIFY_TEMP_CREATION is false, just do the gimplification. */ static enum gimplify_status gimplify_init_constructor (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, bool want_value, bool notify_temp_creation) { tree object, ctor, type; enum gimplify_status ret; vec<constructor_elt, va_gc> *elts; gcc_assert (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == CONSTRUCTOR); if (!notify_temp_creation) { ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; } object = TREE_OPERAND (*expr_p, 0); ctor = TREE_OPERAND (*expr_p, 1) = optimize_compound_literals_in_ctor (TREE_OPERAND (*expr_p, 1)); type = TREE_TYPE (ctor); elts = CONSTRUCTOR_ELTS (ctor); ret = GS_ALL_DONE; switch (TREE_CODE (type)) { case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: case ARRAY_TYPE: { /* Use readonly data for initializers of this or smaller size regardless of the num_nonzero_elements / num_unique_nonzero_elements ratio. */ const HOST_WIDE_INT min_unique_size = 64; /* If num_nonzero_elements / num_unique_nonzero_elements ratio is smaller than this, use readonly data. */ const int unique_nonzero_ratio = 8; /* True if a single access of the object must be ensured. This is the case if the target is volatile, the type is non-addressable and more than one field need to be assigned. */ const bool ensure_single_access = TREE_THIS_VOLATILE (object) && !TREE_ADDRESSABLE (type) && vec_safe_length (elts) > 1; struct gimplify_init_ctor_preeval_data preeval_data; HOST_WIDE_INT num_ctor_elements, num_nonzero_elements; HOST_WIDE_INT num_unique_nonzero_elements; bool cleared, complete_p, valid_const_initializer; /* Aggregate types must lower constructors to initialization of individual elements. The exception is that a CONSTRUCTOR node with no elements indicates zero-initialization of the whole. */ if (vec_safe_is_empty (elts)) { if (notify_temp_creation) return GS_OK; break; } /* Fetch information about the constructor to direct later processing. We might want to make static versions of it in various cases, and can only do so if it known to be a valid constant initializer. */ valid_const_initializer = categorize_ctor_elements (ctor, &num_nonzero_elements, &num_unique_nonzero_elements, &num_ctor_elements, &complete_p); /* If a const aggregate variable is being initialized, then it should never be a lose to promote the variable to be static. */ if (valid_const_initializer && num_nonzero_elements > 1 && TREE_READONLY (object) && VAR_P (object) && !DECL_REGISTER (object) && (flag_merge_constants >= 2 || !TREE_ADDRESSABLE (object)) /* For ctors that have many repeated nonzero elements represented through RANGE_EXPRs, prefer initializing those through runtime loops over copies of large amounts of data from readonly data section. */ && (num_unique_nonzero_elements > num_nonzero_elements / unique_nonzero_ratio || ((unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= (unsigned HOST_WIDE_INT) min_unique_size))) { if (notify_temp_creation) return GS_ERROR; DECL_INITIAL (object) = ctor; TREE_STATIC (object) = 1; if (!DECL_NAME (object)) DECL_NAME (object) = create_tmp_var_name ("C"); walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL); /* ??? C++ doesn't automatically append a .<number> to the assembler name, and even when it does, it looks at FE private data structures to figure out what that number should be, which are not set for this variable. I suppose this is important for local statics for inline functions, which aren't "local" in the object file sense. So in order to get a unique TU-local symbol, we must invoke the lhd version now. */ lhd_set_decl_assembler_name (object); *expr_p = NULL_TREE; break; } /* If there are "lots" of initialized elements, even discounting those that are not address constants (and thus *must* be computed at runtime), then partition the constructor into constant and non-constant parts. Block copy the constant parts in, then generate code for the non-constant parts. */ /* TODO. There's code in cp/typeck.c to do this. */ if (int_size_in_bytes (TREE_TYPE (ctor)) < 0) /* store_constructor will ignore the clearing of variable-sized objects. Initializers for such objects must explicitly set every field that needs to be set. */ cleared = false; else if (!complete_p) /* If the constructor isn't complete, clear the whole object beforehand, unless CONSTRUCTOR_NO_CLEARING is set on it. ??? This ought not to be needed. For any element not present in the initializer, we should simply set them to zero. Except we'd need to *find* the elements that are not present, and that requires trickery to avoid quadratic compile-time behavior in large cases or excessive memory use in small cases. */ cleared = !CONSTRUCTOR_NO_CLEARING (ctor); else if (num_ctor_elements - num_nonzero_elements > CLEAR_RATIO (optimize_function_for_speed_p (cfun)) && num_nonzero_elements < num_ctor_elements / 4) /* If there are "lots" of zeros, it's more efficient to clear the memory and then set the nonzero elements. */ cleared = true; else if (ensure_single_access && num_nonzero_elements == 0) /* If a single access to the target must be ensured and all elements are zero, then it's optimal to clear whatever their number. */ cleared = true; else cleared = false; /* If there are "lots" of initialized elements, and all of them are valid address constants, then the entire initializer can be dropped to memory, and then memcpy'd out. Don't do this for sparse arrays, though, as it's more efficient to follow the standard CONSTRUCTOR behavior of memset followed by individual element initialization. Also don't do this for small all-zero initializers (which aren't big enough to merit clearing), and don't try to make bitwise copies of TREE_ADDRESSABLE types. */ if (valid_const_initializer && complete_p && !(cleared || num_nonzero_elements == 0) && !TREE_ADDRESSABLE (type)) { HOST_WIDE_INT size = int_size_in_bytes (type); unsigned int align; /* ??? We can still get unbounded array types, at least from the C++ front end. This seems wrong, but attempt to work around it for now. */ if (size < 0) { size = int_size_in_bytes (TREE_TYPE (object)); if (size >= 0) TREE_TYPE (ctor) = type = TREE_TYPE (object); } /* Find the maximum alignment we can assume for the object. */ /* ??? Make use of DECL_OFFSET_ALIGN. */ if (DECL_P (object)) align = DECL_ALIGN (object); else align = TYPE_ALIGN (type); /* Do a block move either if the size is so small as to make each individual move a sub-unit move on average, or if it is so large as to make individual moves inefficient. */ if (size > 0 && num_nonzero_elements > 1 /* For ctors that have many repeated nonzero elements represented through RANGE_EXPRs, prefer initializing those through runtime loops over copies of large amounts of data from readonly data section. */ && (num_unique_nonzero_elements > num_nonzero_elements / unique_nonzero_ratio || size <= min_unique_size) && (size < num_nonzero_elements || !can_move_by_pieces (size, align))) { if (notify_temp_creation) return GS_ERROR; walk_tree (&ctor, force_labels_r, NULL, NULL); ctor = tree_output_constant_def (ctor); if (!useless_type_conversion_p (type, TREE_TYPE (ctor))) ctor = build1 (VIEW_CONVERT_EXPR, type, ctor); TREE_OPERAND (*expr_p, 1) = ctor; /* This is no longer an assignment of a CONSTRUCTOR, but we still may have processing to do on the LHS. So pretend we didn't do anything here to let that happen. */ return GS_UNHANDLED; } } /* If a single access to the target must be ensured and there are nonzero elements or the zero elements are not assigned en masse, initialize the target from a temporary. */ if (ensure_single_access && (num_nonzero_elements > 0 || !cleared)) { if (notify_temp_creation) return GS_ERROR; tree temp = create_tmp_var (TYPE_MAIN_VARIANT (type)); TREE_OPERAND (*expr_p, 0) = temp; *expr_p = build2 (COMPOUND_EXPR, TREE_TYPE (*expr_p), *expr_p, build2 (MODIFY_EXPR, void_type_node, object, temp)); return GS_OK; } if (notify_temp_creation) return GS_OK; /* If there are nonzero elements and if needed, pre-evaluate to capture elements overlapping with the lhs into temporaries. We must do this before clearing to fetch the values before they are zeroed-out. */ if (num_nonzero_elements > 0 && TREE_CODE (*expr_p) != INIT_EXPR) { preeval_data.lhs_base_decl = get_base_address (object); if (!DECL_P (preeval_data.lhs_base_decl)) preeval_data.lhs_base_decl = NULL; preeval_data.lhs_alias_set = get_alias_set (object); gimplify_init_ctor_preeval (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, &preeval_data); } bool ctor_has_side_effects_p = TREE_SIDE_EFFECTS (TREE_OPERAND (*expr_p, 1)); if (cleared) { /* Zap the CONSTRUCTOR element list, which simplifies this case. Note that we still have to gimplify, in order to handle the case of variable sized types. Avoid shared tree structures. */ CONSTRUCTOR_ELTS (ctor) = NULL; TREE_SIDE_EFFECTS (ctor) = 0; object = unshare_expr (object); gimplify_stmt (expr_p, pre_p); } /* If we have not block cleared the object, or if there are nonzero elements in the constructor, or if the constructor has side effects, add assignments to the individual scalar fields of the object. */ if (!cleared || num_nonzero_elements > 0 || ctor_has_side_effects_p) gimplify_init_ctor_eval (object, elts, pre_p, cleared); *expr_p = NULL_TREE; } break; case COMPLEX_TYPE: { tree r, i; if (notify_temp_creation) return GS_OK; /* Extract the real and imaginary parts out of the ctor. */ gcc_assert (elts->length () == 2); r = (*elts)[0].value; i = (*elts)[1].value; if (r == NULL || i == NULL) { tree zero = build_zero_cst (TREE_TYPE (type)); if (r == NULL) r = zero; if (i == NULL) i = zero; } /* Complex types have either COMPLEX_CST or COMPLEX_EXPR to represent creation of a complex value. */ if (TREE_CONSTANT (r) && TREE_CONSTANT (i)) { ctor = build_complex (type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; } else { ctor = build2 (COMPLEX_EXPR, type, r, i); TREE_OPERAND (*expr_p, 1) = ctor; ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, rhs_predicate_for (TREE_OPERAND (*expr_p, 0)), fb_rvalue); } } break; case VECTOR_TYPE: { unsigned HOST_WIDE_INT ix; constructor_elt *ce; if (notify_temp_creation) return GS_OK; /* Go ahead and simplify constant constructors to VECTOR_CST. */ if (TREE_CONSTANT (ctor)) { bool constant_p = true; tree value; /* Even when ctor is constant, it might contain non-*_CST elements, such as addresses or trapping values like 1.0/0.0 - 1.0/0.0. Such expressions don't belong in VECTOR_CST nodes. */ FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value) if (!CONSTANT_CLASS_P (value)) { constant_p = false; break; } if (constant_p) { TREE_OPERAND (*expr_p, 1) = build_vector_from_ctor (type, elts); break; } TREE_CONSTANT (ctor) = 0; } /* Vector types use CONSTRUCTOR all the way through gimple compilation as a general initializer. */ FOR_EACH_VEC_SAFE_ELT (elts, ix, ce) { enum gimplify_status tret; tret = gimplify_expr (&ce->value, pre_p, post_p, is_gimple_val, fb_rvalue); if (tret == GS_ERROR) ret = GS_ERROR; else if (TREE_STATIC (ctor) && !initializer_constant_valid_p (ce->value, TREE_TYPE (ce->value))) TREE_STATIC (ctor) = 0; } if (!is_gimple_reg (TREE_OPERAND (*expr_p, 0))) TREE_OPERAND (*expr_p, 1) = get_formal_tmp_var (ctor, pre_p); } break; default: /* So how did we get a CONSTRUCTOR for a scalar type? */ gcc_unreachable (); } if (ret == GS_ERROR) return GS_ERROR; /* If we have gimplified both sides of the initializer but have not emitted an assignment, do so now. */ if (*expr_p) { tree lhs = TREE_OPERAND (*expr_p, 0); tree rhs = TREE_OPERAND (*expr_p, 1); if (want_value && object == lhs) lhs = unshare_expr (lhs); gassign *init = gimple_build_assign (lhs, rhs); gimplify_seq_add_stmt (pre_p, init); } if (want_value) { *expr_p = object; return GS_OK; } else { *expr_p = NULL; return GS_ALL_DONE; } } /* Given a pointer value OP0, return a simplified version of an indirection through OP0, or NULL_TREE if no simplification is possible. This may only be applied to a rhs of an expression. Note that the resulting type may be different from the type pointed to in the sense that it is still compatible from the langhooks point of view. */ static tree gimple_fold_indirect_ref_rhs (tree t) { return gimple_fold_indirect_ref (t); } /* Subroutine of gimplify_modify_expr to do simplifications of MODIFY_EXPRs based on the code of the RHS. We loop for as long as something changes. */ static enum gimplify_status gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p, gimple_seq *pre_p, gimple_seq *post_p, bool want_value) { enum gimplify_status ret = GS_UNHANDLED; bool changed; do { changed = false; switch (TREE_CODE (*from_p)) { case VAR_DECL: /* If we're assigning from a read-only variable initialized with a constructor and not volatile, do the direct assignment from the constructor, but only if the target is not volatile either since this latter assignment might end up being done on a per field basis. However, if the target is volatile and the type is aggregate and non-addressable, gimplify_init_constructor knows that it needs to ensure a single access to the target and it will return GS_OK only in this case. */ if (TREE_READONLY (*from_p) && DECL_INITIAL (*from_p) && TREE_CODE (DECL_INITIAL (*from_p)) == CONSTRUCTOR && !TREE_THIS_VOLATILE (*from_p) && (!TREE_THIS_VOLATILE (*to_p) || (AGGREGATE_TYPE_P (TREE_TYPE (*to_p)) && !TREE_ADDRESSABLE (TREE_TYPE (*to_p))))) { tree old_from = *from_p; enum gimplify_status subret; /* Move the constructor into the RHS. */ *from_p = unshare_expr (DECL_INITIAL (*from_p)); /* Let's see if gimplify_init_constructor will need to put it in memory. */ subret = gimplify_init_constructor (expr_p, NULL, NULL, false, true); if (subret == GS_ERROR) { /* If so, revert the change. */ *from_p = old_from; } else { ret = GS_OK; changed = true; } } break; case INDIRECT_REF: { /* If we have code like *(const A*)(A*)&x where the type of "x" is a (possibly cv-qualified variant of "A"), treat the entire expression as identical to "x". This kind of code arises in C++ when an object is bound to a const reference, and if "x" is a TARGET_EXPR we want to take advantage of the optimization below. */ bool volatile_p = TREE_THIS_VOLATILE (*from_p); tree t = gimple_fold_indirect_ref_rhs (TREE_OPERAND (*from_p, 0)); if (t) { if (TREE_THIS_VOLATILE (t) != volatile_p) { if (DECL_P (t)) t = build_simple_mem_ref_loc (EXPR_LOCATION (*from_p), build_fold_addr_expr (t)); if (REFERENCE_CLASS_P (t)) TREE_THIS_VOLATILE (t) = volatile_p; } *from_p = t; ret = GS_OK; changed = true; } break; } case TARGET_EXPR: { /* If we are initializing something from a TARGET_EXPR, strip the TARGET_EXPR and initialize it directly, if possible. This can't be done if the initializer is void, since that implies that the temporary is set in some non-trivial way. ??? What about code that pulls out the temp and uses it elsewhere? I think that such code never uses the TARGET_EXPR as an initializer. If I'm wrong, we'll die because the temp won't have any RTL. In that case, I guess we'll need to replace references somehow. */ tree init = TARGET_EXPR_INITIAL (*from_p); if (init && (TREE_CODE (*expr_p) != MODIFY_EXPR || !TARGET_EXPR_NO_ELIDE (*from_p)) && !VOID_TYPE_P (TREE_TYPE (init))) { *from_p = init; ret = GS_OK; changed = true; } } break; case COMPOUND_EXPR: /* Remove any COMPOUND_EXPR in the RHS so the following cases will be caught. */ gimplify_compound_expr (from_p, pre_p, true); ret = GS_OK; changed = true; break; case CONSTRUCTOR: /* If we already made some changes, let the front end have a crack at this before we break it down. */ if (ret != GS_UNHANDLED) break; /* If we're initializing from a CONSTRUCTOR, break this into individual MODIFY_EXPRs. */ return gimplify_init_constructor (expr_p, pre_p, post_p, want_value, false); case COND_EXPR: /* If we're assigning to a non-register type, push the assignment down into the branches. This is mandatory for ADDRESSABLE types, since we cannot generate temporaries for such, but it saves a copy in other cases as well. */ if (!is_gimple_reg_type (TREE_TYPE (*from_p))) { /* This code should mirror the code in gimplify_cond_expr. */ enum tree_code code = TREE_CODE (*expr_p); tree cond = *from_p; tree result = *to_p; ret = gimplify_expr (&result, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; /* If we are going to write RESULT more than once, clear TREE_READONLY flag, otherwise we might incorrectly promote the variable to static const and initialize it at compile time in one of the branches. */ if (VAR_P (result) && TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node && TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node) TREE_READONLY (result) = 0; if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node) TREE_OPERAND (cond, 1) = build2 (code, void_type_node, result, TREE_OPERAND (cond, 1)); if (TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node) TREE_OPERAND (cond, 2) = build2 (code, void_type_node, unshare_expr (result), TREE_OPERAND (cond, 2)); TREE_TYPE (cond) = void_type_node; recalculate_side_effects (cond); if (want_value) { gimplify_and_add (cond, pre_p); *expr_p = unshare_expr (result); } else *expr_p = cond; return ret; } break; case CALL_EXPR: /* For calls that return in memory, give *to_p as the CALL_EXPR's return slot so that we don't generate a temporary. */ if (!CALL_EXPR_RETURN_SLOT_OPT (*from_p) && aggregate_value_p (*from_p, *from_p)) { bool use_target; if (!(rhs_predicate_for (*to_p))(*from_p)) /* If we need a temporary, *to_p isn't accurate. */ use_target = false; /* It's OK to use the return slot directly unless it's an NRV. */ else if (TREE_CODE (*to_p) == RESULT_DECL && DECL_NAME (*to_p) == NULL_TREE && needs_to_live_in_memory (*to_p)) use_target = true; else if (is_gimple_reg_type (TREE_TYPE (*to_p)) || (DECL_P (*to_p) && DECL_REGISTER (*to_p))) /* Don't force regs into memory. */ use_target = false; else if (TREE_CODE (*expr_p) == INIT_EXPR) /* It's OK to use the target directly if it's being initialized. */ use_target = true; else if (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (*to_p))) != INTEGER_CST) /* Always use the target and thus RSO for variable-sized types. GIMPLE cannot deal with a variable-sized assignment embedded in a call statement. */ use_target = true; else if (TREE_CODE (*to_p) != SSA_NAME && (!is_gimple_variable (*to_p) || needs_to_live_in_memory (*to_p))) /* Don't use the original target if it's already addressable; if its address escapes, and the called function uses the NRV optimization, a conforming program could see *to_p change before the called function returns; see c++/19317. When optimizing, the return_slot pass marks more functions as safe after we have escape info. */ use_target = false; else use_target = true; if (use_target) { CALL_EXPR_RETURN_SLOT_OPT (*from_p) = 1; mark_addressable (*to_p); } } break; case WITH_SIZE_EXPR: /* Likewise for calls that return an aggregate of non-constant size, since we would not be able to generate a temporary at all. */ if (TREE_CODE (TREE_OPERAND (*from_p, 0)) == CALL_EXPR) { *from_p = TREE_OPERAND (*from_p, 0); /* We don't change ret in this case because the WITH_SIZE_EXPR might have been added in gimplify_modify_expr, so returning GS_OK would lead to an infinite loop. */ changed = true; } break; /* If we're initializing from a container, push the initialization inside it. */ case CLEANUP_POINT_EXPR: case BIND_EXPR: case STATEMENT_LIST: { tree wrap = *from_p; tree t; ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_min_lval, fb_lvalue); if (ret != GS_ERROR) ret = GS_OK; t = voidify_wrapper_expr (wrap, *expr_p); gcc_assert (t == *expr_p); if (want_value) { gimplify_and_add (wrap, pre_p); *expr_p = unshare_expr (*to_p); } else *expr_p = wrap; return GS_OK; } case COMPOUND_LITERAL_EXPR: { tree complit = TREE_OPERAND (*expr_p, 1); tree decl_s = COMPOUND_LITERAL_EXPR_DECL_EXPR (complit); tree decl = DECL_EXPR_DECL (decl_s); tree init = DECL_INITIAL (decl); /* struct T x = (struct T) { 0, 1, 2 } can be optimized into struct T x = { 0, 1, 2 } if the address of the compound literal has never been taken. */ if (!TREE_ADDRESSABLE (complit) && !TREE_ADDRESSABLE (decl) && init) { *expr_p = copy_node (*expr_p); TREE_OPERAND (*expr_p, 1) = init; return GS_OK; } } default: break; } } while (changed); return ret; } /* Return true if T looks like a valid GIMPLE statement. */ static bool is_gimple_stmt (tree t) { const enum tree_code code = TREE_CODE (t); switch (code) { case NOP_EXPR: /* The only valid NOP_EXPR is the empty statement. */ return IS_EMPTY_STMT (t); case BIND_EXPR: case COND_EXPR: /* These are only valid if they're void. */ return TREE_TYPE (t) == NULL || VOID_TYPE_P (TREE_TYPE (t)); case SWITCH_EXPR: case GOTO_EXPR: case RETURN_EXPR: case LABEL_EXPR: case CASE_LABEL_EXPR: case TRY_CATCH_EXPR: case TRY_FINALLY_EXPR: case EH_FILTER_EXPR: case CATCH_EXPR: case ASM_EXPR: case STATEMENT_LIST: case OACC_PARALLEL: case OACC_KERNELS: case OACC_SERIAL: case OACC_DATA: case OACC_HOST_DATA: case OACC_DECLARE: case OACC_UPDATE: case OACC_ENTER_DATA: case OACC_EXIT_DATA: case OACC_CACHE: case OMP_PARALLEL: case OMP_FOR: case OMP_SIMD: case OMP_DISTRIBUTE: case OMP_LOOP: case OACC_LOOP: case OMP_SCAN: case OMP_SECTIONS: case OMP_SECTION: case OMP_SINGLE: case OMP_MASTER: case OMP_TASKGROUP: case OMP_ORDERED: case OMP_CRITICAL: case OMP_TASK: case OMP_TARGET: case OMP_TARGET_DATA: case OMP_TARGET_UPDATE: case OMP_TARGET_ENTER_DATA: case OMP_TARGET_EXIT_DATA: case OMP_TASKLOOP: case OMP_TEAMS: /* These are always void. */ return true; case CALL_EXPR: case MODIFY_EXPR: case PREDICT_EXPR: /* These are valid regardless of their type. */ return true; default: return false; } } /* Promote partial stores to COMPLEX variables to total stores. *EXPR_P is a MODIFY_EXPR with a lhs of a REAL/IMAGPART_EXPR of a gimple register. IMPORTANT NOTE: This promotion is performed by introducing a load of the other, unmodified part of the complex object just before the total store. As a consequence, if the object is still uninitialized, an undefined value will be loaded into a register, which may result in a spurious exception if the register is floating-point and the value happens to be a signaling NaN for example. Then the fully-fledged complex operations lowering pass followed by a DCE pass are necessary in order to fix things up. */ static enum gimplify_status gimplify_modify_expr_complex_part (tree *expr_p, gimple_seq *pre_p, bool want_value) { enum tree_code code, ocode; tree lhs, rhs, new_rhs, other, realpart, imagpart; lhs = TREE_OPERAND (*expr_p, 0); rhs = TREE_OPERAND (*expr_p, 1); code = TREE_CODE (lhs); lhs = TREE_OPERAND (lhs, 0); ocode = code == REALPART_EXPR ? IMAGPART_EXPR : REALPART_EXPR; other = build1 (ocode, TREE_TYPE (rhs), lhs); TREE_NO_WARNING (other) = 1; other = get_formal_tmp_var (other, pre_p); realpart = code == REALPART_EXPR ? rhs : other; imagpart = code == REALPART_EXPR ? other : rhs; if (TREE_CONSTANT (realpart) && TREE_CONSTANT (imagpart)) new_rhs = build_complex (TREE_TYPE (lhs), realpart, imagpart); else new_rhs = build2 (COMPLEX_EXPR, TREE_TYPE (lhs), realpart, imagpart); gimplify_seq_add_stmt (pre_p, gimple_build_assign (lhs, new_rhs)); *expr_p = (want_value) ? rhs : NULL_TREE; return GS_ALL_DONE; } /* Gimplify the MODIFY_EXPR node pointed to by EXPR_P. modify_expr : varname '=' rhs | '*' ID '=' rhs PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. WANT_VALUE is nonzero iff we want to use the value of this expression in another expression. */ static enum gimplify_status gimplify_modify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, bool want_value) { tree *from_p = &TREE_OPERAND (*expr_p, 1); tree *to_p = &TREE_OPERAND (*expr_p, 0); enum gimplify_status ret = GS_UNHANDLED; gimple *assign; location_t loc = EXPR_LOCATION (*expr_p); gimple_stmt_iterator gsi; gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR || TREE_CODE (*expr_p) == INIT_EXPR); /* Trying to simplify a clobber using normal logic doesn't work, so handle it here. */ if (TREE_CLOBBER_P (*from_p)) { ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue); if (ret == GS_ERROR) return ret; gcc_assert (!want_value); if (!VAR_P (*to_p) && TREE_CODE (*to_p) != MEM_REF) { tree addr = get_initialized_tmp_var (build_fold_addr_expr (*to_p), pre_p, post_p); *to_p = build_simple_mem_ref_loc (EXPR_LOCATION (*to_p), addr); } gimplify_seq_add_stmt (pre_p, gimple_build_assign (*to_p, *from_p)); *expr_p = NULL; return GS_ALL_DONE; } /* Insert pointer conversions required by the middle-end that are not required by the frontend. This fixes middle-end type checking for for example gcc.dg/redecl-6.c. */ if (POINTER_TYPE_P (TREE_TYPE (*to_p))) { STRIP_USELESS_TYPE_CONVERSION (*from_p); if (!useless_type_conversion_p (TREE_TYPE (*to_p), TREE_TYPE (*from_p))) *from_p = fold_convert_loc (loc, TREE_TYPE (*to_p), *from_p); } /* See if any simplifications can be done based on what the RHS is. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* For zero sized types only gimplify the left hand side and right hand side as statements and throw away the assignment. Do this after gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable types properly. */ if (zero_sized_type (TREE_TYPE (*from_p)) && !want_value /* Don't do this for calls that return addressable types, expand_call relies on those having a lhs. */ && !(TREE_ADDRESSABLE (TREE_TYPE (*from_p)) && TREE_CODE (*from_p) == CALL_EXPR)) { gimplify_stmt (from_p, pre_p); gimplify_stmt (to_p, pre_p); *expr_p = NULL_TREE; return GS_ALL_DONE; } /* If the value being copied is of variable width, compute the length of the copy into a WITH_SIZE_EXPR. Note that we need to do this before gimplifying any of the operands so that we can resolve any PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses the size of the expression to be copied, not of the destination, so that is what we must do here. */ maybe_with_size_expr (from_p); /* As a special case, we have to temporarily allow for assignments with a CALL_EXPR on the RHS. Since in GIMPLE a function call is a toplevel statement, when gimplifying the GENERIC expression MODIFY_EXPR <a, CALL_EXPR <foo>>, we cannot create the tuple GIMPLE_ASSIGN <a, GIMPLE_CALL <foo>>. Instead, we need to create the tuple GIMPLE_CALL <a, foo>. To prevent gimplify_expr from trying to create a new temporary for foo's LHS, we tell it that it should only gimplify until it reaches the CALL_EXPR. On return from gimplify_expr, the newly created GIMPLE_CALL <foo> will be the last statement in *PRE_P and all we need to do here is set 'a' to be its LHS. */ /* Gimplify the RHS first for C++17 and bug 71104. */ gimple_predicate initial_pred = initial_rhs_predicate_for (*to_p); ret = gimplify_expr (from_p, pre_p, post_p, initial_pred, fb_rvalue); if (ret == GS_ERROR) return ret; /* Then gimplify the LHS. */ /* If we gimplified the RHS to a CALL_EXPR and that call may return twice we have to make sure to gimplify into non-SSA as otherwise the abnormal edge added later will make those defs not dominate their uses. ??? Technically this applies only to the registers used in the resulting non-register *TO_P. */ bool saved_into_ssa = gimplify_ctxp->into_ssa; if (saved_into_ssa && TREE_CODE (*from_p) == CALL_EXPR && call_expr_flags (*from_p) & ECF_RETURNS_TWICE) gimplify_ctxp->into_ssa = false; ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue); gimplify_ctxp->into_ssa = saved_into_ssa; if (ret == GS_ERROR) return ret; /* Now that the LHS is gimplified, re-gimplify the RHS if our initial guess for the predicate was wrong. */ gimple_predicate final_pred = rhs_predicate_for (*to_p); if (final_pred != initial_pred) { ret = gimplify_expr (from_p, pre_p, post_p, final_pred, fb_rvalue); if (ret == GS_ERROR) return ret; } /* In case of va_arg internal fn wrappped in a WITH_SIZE_EXPR, add the type size as argument to the call. */ if (TREE_CODE (*from_p) == WITH_SIZE_EXPR) { tree call = TREE_OPERAND (*from_p, 0); tree vlasize = TREE_OPERAND (*from_p, 1); if (TREE_CODE (call) == CALL_EXPR && CALL_EXPR_IFN (call) == IFN_VA_ARG) { int nargs = call_expr_nargs (call); tree type = TREE_TYPE (call); tree ap = CALL_EXPR_ARG (call, 0); tree tag = CALL_EXPR_ARG (call, 1); tree aptag = CALL_EXPR_ARG (call, 2); tree newcall = build_call_expr_internal_loc (EXPR_LOCATION (call), IFN_VA_ARG, type, nargs + 1, ap, tag, aptag, vlasize); TREE_OPERAND (*from_p, 0) = newcall; } } /* Now see if the above changed *from_p to something we handle specially. */ ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p, want_value); if (ret != GS_UNHANDLED) return ret; /* If we've got a variable sized assignment between two lvalues (i.e. does not involve a call), then we can make things a bit more straightforward by converting the assignment to memcpy or memset. */ if (TREE_CODE (*from_p) == WITH_SIZE_EXPR) { tree from = TREE_OPERAND (*from_p, 0); tree size = TREE_OPERAND (*from_p, 1); if (TREE_CODE (from) == CONSTRUCTOR) return gimplify_modify_expr_to_memset (expr_p, size, want_value, pre_p); if (is_gimple_addressable (from)) { *from_p = from; return gimplify_modify_expr_to_memcpy (expr_p, size, want_value, pre_p); } } /* Transform partial stores to non-addressable complex variables into total stores. This allows us to use real instead of virtual operands for these variables, which improves optimization. */ if ((TREE_CODE (*to_p) == REALPART_EXPR || TREE_CODE (*to_p) == IMAGPART_EXPR) && is_gimple_reg (TREE_OPERAND (*to_p, 0))) return gimplify_modify_expr_complex_part (expr_p, pre_p, want_value); /* Try to alleviate the effects of the gimplification creating artificial temporaries (see for example is_gimple_reg_rhs) on the debug info, but make sure not to create DECL_DEBUG_EXPR links across functions. */ if (!gimplify_ctxp->into_ssa && VAR_P (*from_p) && DECL_IGNORED_P (*from_p) && DECL_P (*to_p) && !DECL_IGNORED_P (*to_p) && decl_function_context (*to_p) == current_function_decl && decl_function_context (*from_p) == current_function_decl) { if (!DECL_NAME (*from_p) && DECL_NAME (*to_p)) DECL_NAME (*from_p) = create_tmp_var_name (IDENTIFIER_POINTER (DECL_NAME (*to_p))); DECL_HAS_DEBUG_EXPR_P (*from_p) = 1; SET_DECL_DEBUG_EXPR (*from_p, *to_p); } if (want_value && TREE_THIS_VOLATILE (*to_p)) *from_p = get_initialized_tmp_var (*from_p, pre_p, post_p); if (TREE_CODE (*from_p) == CALL_EXPR) { /* Since the RHS is a CALL_EXPR, we need to create a GIMPLE_CALL instead of a GIMPLE_ASSIGN. */ gcall *call_stmt; if (CALL_EXPR_FN (*from_p) == NULL_TREE) { /* Gimplify internal functions created in the FEs. */ int nargs = call_expr_nargs (*from_p), i; enum internal_fn ifn = CALL_EXPR_IFN (*from_p); auto_vec<tree> vargs (nargs); for (i = 0; i < nargs; i++) { gimplify_arg (&CALL_EXPR_ARG (*from_p, i), pre_p, EXPR_LOCATION (*from_p)); vargs.quick_push (CALL_EXPR_ARG (*from_p, i)); } call_stmt = gimple_build_call_internal_vec (ifn, vargs); gimple_call_set_nothrow (call_stmt, TREE_NOTHROW (*from_p)); gimple_set_location (call_stmt, EXPR_LOCATION (*expr_p)); } else { tree fnptrtype = TREE_TYPE (CALL_EXPR_FN (*from_p)); CALL_EXPR_FN (*from_p) = TREE_OPERAND (CALL_EXPR_FN (*from_p), 0); STRIP_USELESS_TYPE_CONVERSION (CALL_EXPR_FN (*from_p)); tree fndecl = get_callee_fndecl (*from_p); if (fndecl && fndecl_built_in_p (fndecl, BUILT_IN_EXPECT) && call_expr_nargs (*from_p) == 3) call_stmt = gimple_build_call_internal (IFN_BUILTIN_EXPECT, 3, CALL_EXPR_ARG (*from_p, 0), CALL_EXPR_ARG (*from_p, 1), CALL_EXPR_ARG (*from_p, 2)); else { call_stmt = gimple_build_call_from_tree (*from_p, fnptrtype); } } notice_special_calls (call_stmt); if (!gimple_call_noreturn_p (call_stmt) || !should_remove_lhs_p (*to_p)) gimple_call_set_lhs (call_stmt, *to_p); else if (TREE_CODE (*to_p) == SSA_NAME) /* The above is somewhat premature, avoid ICEing later for a SSA name w/o a definition. We may have uses in the GIMPLE IL. ??? This doesn't make it a default-def. */ SSA_NAME_DEF_STMT (*to_p) = gimple_build_nop (); assign = call_stmt; } else { assign = gimple_build_assign (*to_p, *from_p); gimple_set_location (assign, EXPR_LOCATION (*expr_p)); if (COMPARISON_CLASS_P (*from_p)) gimple_set_no_warning (assign, TREE_NO_WARNING (*from_p)); } if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p)) { /* We should have got an SSA name from the start. */ gcc_assert (TREE_CODE (*to_p) == SSA_NAME || ! gimple_in_ssa_p (cfun)); } gimplify_seq_add_stmt (pre_p, assign); gsi = gsi_last (*pre_p); maybe_fold_stmt (&gsi); if (want_value) { *expr_p = TREE_THIS_VOLATILE (*to_p) ? *from_p : unshare_expr (*to_p); return GS_OK; } else *expr_p = NULL; return GS_ALL_DONE; } /* Gimplify a comparison between two variable-sized objects. Do this with a call to BUILT_IN_MEMCMP. */ static enum gimplify_status gimplify_variable_sized_compare (tree *expr_p) { location_t loc = EXPR_LOCATION (*expr_p); tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree t, arg, dest, src, expr; arg = TYPE_SIZE_UNIT (TREE_TYPE (op0)); arg = unshare_expr (arg); arg = SUBSTITUTE_PLACEHOLDER_IN_EXPR (arg, op0); src = build_fold_addr_expr_loc (loc, op1); dest = build_fold_addr_expr_loc (loc, op0); t = builtin_decl_implicit (BUILT_IN_MEMCMP); t = build_call_expr_loc (loc, t, 3, dest, src, arg); expr = build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node); SET_EXPR_LOCATION (expr, loc); *expr_p = expr; return GS_OK; } /* Gimplify a comparison between two aggregate objects of integral scalar mode as a comparison between the bitwise equivalent scalar values. */ static enum gimplify_status gimplify_scalar_mode_aggregate_compare (tree *expr_p) { location_t loc = EXPR_LOCATION (*expr_p); tree op0 = TREE_OPERAND (*expr_p, 0); tree op1 = TREE_OPERAND (*expr_p, 1); tree type = TREE_TYPE (op0); tree scalar_type = lang_hooks.types.type_for_mode (TYPE_MODE (type), 1); op0 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op0); op1 = fold_build1_loc (loc, VIEW_CONVERT_EXPR, scalar_type, op1); *expr_p = fold_build2_loc (loc, TREE_CODE (*expr_p), TREE_TYPE (*expr_p), op0, op1); return GS_OK; } /* Gimplify an expression sequence. This function gimplifies each expression and rewrites the original expression with the last expression of the sequence in GIMPLE form. PRE_P points to the list where the side effects for all the expressions in the sequence will be emitted. WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */ static enum gimplify_status gimplify_compound_expr (tree *expr_p, gimple_seq *pre_p, bool want_value) { tree t = *expr_p; do { tree *sub_p = &TREE_OPERAND (t, 0); if (TREE_CODE (*sub_p) == COMPOUND_EXPR) gimplify_compound_expr (sub_p, pre_p, false); else gimplify_stmt (sub_p, pre_p); t = TREE_OPERAND (t, 1); } while (TREE_CODE (t) == COMPOUND_EXPR); *expr_p = t; if (want_value) return GS_OK; else { gimplify_stmt (expr_p, pre_p); return GS_ALL_DONE; } } /* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to gimplify. After gimplification, EXPR_P will point to a new temporary that holds the original value of the SAVE_EXPR node. PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. */ static enum gimplify_status gimplify_save_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p) { enum gimplify_status ret = GS_ALL_DONE; tree val; gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR); val = TREE_OPERAND (*expr_p, 0); /* If the SAVE_EXPR has not been resolved, then evaluate it once. */ if (!SAVE_EXPR_RESOLVED_P (*expr_p)) { /* The operand may be a void-valued expression. It is being executed only for its side-effects. */ if (TREE_TYPE (val) == void_type_node) { ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_stmt, fb_none); val = NULL; } else /* The temporary may not be an SSA name as later abnormal and EH control flow may invalidate use/def domination. When in SSA form then assume there are no such issues and SAVE_EXPRs only appear via GENERIC foldings. */ val = get_initialized_tmp_var (val, pre_p, post_p, gimple_in_ssa_p (cfun)); TREE_OPERAND (*expr_p, 0) = val; SAVE_EXPR_RESOLVED_P (*expr_p) = 1; } *expr_p = val; return ret; } /* Rewrite the ADDR_EXPR node pointed to by EXPR_P unary_expr : ... | '&' varname ... PRE_P points to the list where side effects that must happen before *EXPR_P should be stored. POST_P points to the list where side effects that must happen after *EXPR_P should be stored. */ static enum gimplify_status gimplify_addr_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p) { tree expr = *expr_p; tree op0 = TREE_OPERAND (expr, 0); enum gimplify_status ret; location_t loc = EXPR_LOCATION (*expr_p); switch (TREE_CODE (op0)) { case INDIRECT_REF: do_indirect_ref: /* Check if we are dealing with an expression of the form '&*ptr'. While the front end folds away '&*ptr' into 'ptr', these expressions may be generated internally by the compiler (e.g., builtins like __builtin_va_end). */ /* Caution: the silent array decomposition semantics we allow for ADDR_EXPR means we can't always discard the pair. */ /* Gimplification of the ADDR_EXPR operand may drop cv-qualification conversions, so make sure we add them if needed. */ { tree op00 = TREE_OPERAND (op0, 0); tree t_expr = TREE_TYPE (expr); tree t_op00 = TREE_TYPE (op00); if (!useless_type_conversion_p (t_expr, t_op00)) op00 = fold_convert_loc (loc, TREE_TYPE (expr), op00); *expr_p = op00; ret = GS_OK; } break; case VIEW_CONVERT_EXPR: /* Take the address of our operand and then convert it to the type of this ADDR_EXPR. ??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at all clear. The impact of this transformation is even less clear. */ /* If the operand is a useless conversion, look through it. Doing so guarantees that the ADDR_EXPR and its operand will remain of the same type. */ if (tree_ssa_useless_type_conversion (TREE_OPERAND (op0, 0))) op0 = TREE_OPERAND (op0, 0); *expr_p = fold_convert_loc (loc, TREE_TYPE (expr), build_fold_addr_expr_loc (loc, TREE_OPERAND (op0, 0))); ret = GS_OK; break; case MEM_REF: if (integer_zerop (TREE_OPERAND (op0, 1))) goto do_indirect_ref; /* fall through */ default: /* If we see a call to a declared builtin or see its address being taken (we can unify those cases here) then we can mark the builtin for implicit generation by GCC. */ if (TREE_CODE (op0) == FUNCTION_DECL && fndecl_built_in_p (op0, BUILT_IN_NORMAL) && builtin_decl_declared_p (DECL_FUNCTION_CODE (op0))) set_builtin_decl_implicit_p (DECL_FUNCTION_CODE (op0), true); /* We use fb_either here because the C frontend sometimes takes the address of a call that returns a struct; see gcc.dg/c99-array-lval-1.c. The gimplifier will correctly make the implied temporary explicit. */ /* Make the operand addressable. */ ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p, is_gimple_addressable, fb_either); if (ret == GS_ERROR) break; /* Then mark it. Beware that it may not be possible to do so directly if a temporary has been created by the gimplification. */ prepare_gimple_addressable (&TREE_OPERAND (expr, 0), pre_p); op0 = TREE_OPERAND (expr, 0); /* For various reasons, the gimplification of the expression may have made a new INDIRECT_REF. */ if (TREE_CODE (op0) == INDIRECT_REF || (TREE_CODE (op0) == MEM_REF && integer_zerop (TREE_OPERAND (op0, 1)))) goto do_indirect_ref; mark_addressable (TREE_OPERAND (expr, 0)); /* The FEs may end up building ADDR_EXPRs early on a decl with an incomplete type. Re-build ADDR_EXPRs in canonical form here. */ if (!types_compatible_p (TREE_TYPE (op0), TREE_TYPE (TREE_TYPE (expr)))) *expr_p = build_fold_addr_expr (op0); /* Make sure TREE_CONSTANT and TREE_SIDE_EFFECTS are set properly. */ recompute_tree_invariant_for_addr_expr (*expr_p); /* If we re-built the ADDR_EXPR add a conversion to the original type if required. */ if (!useless_type_conversion_p (TREE_TYPE (expr), TREE_TYPE (*expr_p))) *expr_p = fold_convert (TREE_TYPE (expr), *expr_p); break; } return ret; } /* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple value; output operands should be a gimple lvalue. */ static enum gimplify_status gimplify_asm_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p) { tree expr; int noutputs; const char **oconstraints; int i; tree link; const char *constraint; bool allows_mem, allows_reg, is_inout; enum gimplify_status ret, tret; gasm *stmt; vec<tree, va_gc> *inputs; vec<tree, va_gc> *outputs; vec<tree, va_gc> *clobbers; vec<tree, va_gc> *labels; tree link_next; expr = *expr_p; noutputs = list_length (ASM_OUTPUTS (expr)); oconstraints = (const char **) alloca ((noutputs) * sizeof (const char *)); inputs = NULL; outputs = NULL; clobbers = NULL; labels = NULL; ret = GS_ALL_DONE; link_next = NULL_TREE; for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = link_next) { bool ok; size_t constraint_len; link_next = TREE_CHAIN (link); oconstraints[i] = constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); constraint_len = strlen (constraint); if (constraint_len == 0) continue; ok = parse_output_constraint (&constraint, i, 0, 0, &allows_mem, &allows_reg, &is_inout); if (!ok) { ret = GS_ERROR; is_inout = false; } /* If we can't make copies, we can only accept memory. Similarly for VLAs. */ tree outtype = TREE_TYPE (TREE_VALUE (link)); if (outtype != error_mark_node && (TREE_ADDRESSABLE (outtype) || !COMPLETE_TYPE_P (outtype) || !tree_fits_poly_uint64_p (TYPE_SIZE_UNIT (outtype)))) { if (allows_mem) allows_reg = 0; else { error ("impossible constraint in %<asm%>"); error ("non-memory output %d must stay in memory", i); return GS_ERROR; } } if (!allows_reg && allows_mem) mark_addressable (TREE_VALUE (link)); tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_inout ? is_gimple_min_lval : is_gimple_lvalue, fb_lvalue | fb_mayfail); if (tret == GS_ERROR) { error ("invalid lvalue in %<asm%> output %d", i); ret = tret; } /* If the constraint does not allow memory make sure we gimplify it to a register if it is not already but its base is. This happens for complex and vector components. */ if (!allows_mem) { tree op = TREE_VALUE (link); if (! is_gimple_val (op) && is_gimple_reg_type (TREE_TYPE (op)) && is_gimple_reg (get_base_address (op))) { tree tem = create_tmp_reg (TREE_TYPE (op)); tree ass; if (is_inout) { ass = build2 (MODIFY_EXPR, TREE_TYPE (tem), tem, unshare_expr (op)); gimplify_and_add (ass, pre_p); } ass = build2 (MODIFY_EXPR, TREE_TYPE (tem), op, tem); gimplify_and_add (ass, post_p); TREE_VALUE (link) = tem; tret = GS_OK; } } vec_safe_push (outputs, link); TREE_CHAIN (link) = NULL_TREE; if (is_inout) { /* An input/output operand. To give the optimizers more flexibility, split it into separate input and output operands. */ tree input; /* Buffer big enough to format a 32-bit UINT_MAX into. */ char buf[11]; /* Turn the in/out constraint into an output constraint. */ char *p = xstrdup (constraint); p[0] = '='; TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p); /* And add a matching input constraint. */ if (allows_reg) { sprintf (buf, "%u", i); /* If there are multiple alternatives in the constraint, handle each of them individually. Those that allow register will be replaced with operand number, the others will stay unchanged. */ if (strchr (p, ',') != NULL) { size_t len = 0, buflen = strlen (buf); char *beg, *end, *str, *dst; for (beg = p + 1;;) { end = strchr (beg, ','); if (end == NULL) end = strchr (beg, '\0'); if ((size_t) (end - beg) < buflen) len += buflen + 1; else len += end - beg + 1; if (*end) beg = end + 1; else break; } str = (char *) alloca (len); for (beg = p + 1, dst = str;;) { const char *tem; bool mem_p, reg_p, inout_p; end = strchr (beg, ','); if (end) *end = '\0'; beg[-1] = '='; tem = beg - 1; parse_output_constraint (&tem, i, 0, 0, &mem_p, &reg_p, &inout_p); if (dst != str) *dst++ = ','; if (reg_p) { memcpy (dst, buf, buflen); dst += buflen; } else { if (end) len = end - beg; else len = strlen (beg); memcpy (dst, beg, len); dst += len; } if (end) beg = end + 1; else break; } *dst = '\0'; input = build_string (dst - str, str); } else input = build_string (strlen (buf), buf); } else input = build_string (constraint_len - 1, constraint + 1); free (p); input = build_tree_list (build_tree_list (NULL_TREE, input), unshare_expr (TREE_VALUE (link))); ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input); } } link_next = NULL_TREE; for (link = ASM_INPUTS (expr); link; ++i, link = link_next) { link_next = TREE_CHAIN (link); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link))); parse_input_constraint (&constraint, 0, 0, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If we can't make copies, we can only accept memory. */ tree intype = TREE_TYPE (TREE_VALUE (link)); if (intype != error_mark_node && (TREE_ADDRESSABLE (intype) || !COMPLETE_TYPE_P (intype) || !tree_fits_poly_uint64_p (TYPE_SIZE_UNIT (intype)))) { if (allows_mem) allows_reg = 0; else { error ("impossible constraint in %<asm%>"); error ("non-memory input %d must stay in memory", i); return GS_ERROR; } } /* If the operand is a memory input, it should be an lvalue. */ if (!allows_reg && allows_mem) { tree inputv = TREE_VALUE (link); STRIP_NOPS (inputv); if (TREE_CODE (inputv) == PREDECREMENT_EXPR || TREE_CODE (inputv) == PREINCREMENT_EXPR || TREE_CODE (inputv) == POSTDECREMENT_EXPR || TREE_CODE (inputv) == POSTINCREMENT_EXPR || TREE_CODE (inputv) == MODIFY_EXPR) TREE_VALUE (link) = error_mark_node; tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_lvalue, fb_lvalue | fb_mayfail); if (tret != GS_ERROR) { /* Unlike output operands, memory inputs are not guaranteed to be lvalues by the FE, and while the expressions are marked addressable there, if it is e.g. a statement expression, temporaries in it might not end up being addressable. They might be already used in the IL and thus it is too late to make them addressable now though. */ tree x = TREE_VALUE (link); while (handled_component_p (x)) x = TREE_OPERAND (x, 0); if (TREE_CODE (x) == MEM_REF && TREE_CODE (TREE_OPERAND (x, 0)) == ADDR_EXPR) x = TREE_OPERAND (TREE_OPERAND (x, 0), 0); if ((VAR_P (x) || TREE_CODE (x) == PARM_DECL || TREE_CODE (x) == RESULT_DECL) && !TREE_ADDRESSABLE (x) && is_gimple_reg (x)) { warning_at (EXPR_LOC_OR_LOC (TREE_VALUE (link), input_location), 0, "memory input %d is not directly addressable", i); prepare_gimple_addressable (&TREE_VALUE (link), pre_p); } } mark_addressable (TREE_VALUE (link)); if (tret == GS_ERROR) { error_at (EXPR_LOC_OR_LOC (TREE_VALUE (link), input_location), "memory input %d is not directly addressable", i); ret = tret; } } else { tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p, is_gimple_asm_val, fb_rvalue); if (tret == GS_ERROR) ret = tret; } TREE_CHAIN (link) = NULL_TREE; vec_safe_push (inputs, link); } link_next = NULL_TREE; for (link = ASM_CLOBBERS (expr); link; ++i, link = link_next) { link_next = TREE_CHAIN (link); TREE_CHAIN (link) = NULL_TREE; vec_safe_push (clobbers, link); } link_next = NULL_TREE; for (link = ASM_LABELS (expr); link; ++i, link = link_next) { link_next = TREE_CHAIN (link); TREE_CHAIN (link) = NULL_TREE; vec_safe_push (labels, link); } /* Do not add ASMs with errors to the gimple IL stream. */ if (ret != GS_ERROR) { stmt = gimple_build_asm_vec (TREE_STRING_POINTER (ASM_STRING (expr)), inputs, outputs, clobbers, labels); gimple_asm_set_volatile (stmt, ASM_VOLATILE_P (expr) || noutputs == 0); gimple_asm_set_input (stmt, ASM_INPUT_P (expr)); gimple_asm_set_inline (stmt, ASM_INLINE_P (expr)); gimplify_seq_add_stmt (pre_p, stmt); } return ret; } /* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding GIMPLE_WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we return to this function. FIXME should we complexify the prequeue handling instead? Or use flags for all the cleanups and let the optimizer tighten them up? The current code seems pretty fragile; it will break on a cleanup within any non-conditional nesting. But any such nesting would be broken, anyway; we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct and continues out of it. We can do that at the RTL level, though, so having an optimizer to tighten up try/finally regions would be a Good Thing. */ static enum gimplify_status gimplify_cleanup_point_expr (tree *expr_p, gimple_seq *pre_p) { gimple_stmt_iterator iter; gimple_seq body_sequence = NULL; tree temp = voidify_wrapper_expr (*expr_p, NULL); /* We only care about the number of conditions between the innermost CLEANUP_POINT_EXPR and the cleanup. So save and reset the count and any cleanups collected outside the CLEANUP_POINT_EXPR. */ int old_conds = gimplify_ctxp->conditions; gimple_seq old_cleanups = gimplify_ctxp->conditional_cleanups; bool old_in_cleanup_point_expr = gimplify_ctxp->in_cleanup_point_expr; gimplify_ctxp->conditions = 0; gimplify_ctxp->conditional_cleanups = NULL; gimplify_ctxp->in_cleanup_point_expr = true; gimplify_stmt (&TREE_OPERAND (*expr_p, 0), &body_sequence); gimplify_ctxp->conditions = old_conds; gimplify_ctxp->conditional_cleanups = old_cleanups; gimplify_ctxp->in_cleanup_point_expr = old_in_cleanup_point_expr; for (iter = gsi_start (body_sequence); !gsi_end_p (iter); ) { gimple *wce = gsi_stmt (iter); if (gimple_code (wce) == GIMPLE_WITH_CLEANUP_EXPR) { if (gsi_one_before_end_p (iter)) { /* Note that gsi_insert_seq_before and gsi_remove do not scan operands, unlike some other sequence mutators. */ if (!gimple_wce_cleanup_eh_only (wce)) gsi_insert_seq_before_without_update (&iter, gimple_wce_cleanup (wce), GSI_SAME_STMT); gsi_remove (&iter, true); break; } else { gtry *gtry; gimple_seq seq; enum gimple_try_flags kind; if (gimple_wce_cleanup_eh_only (wce)) kind = GIMPLE_TRY_CATCH; else kind = GIMPLE_TRY_FINALLY; seq = gsi_split_seq_after (iter); gtry = gimple_build_try (seq, gimple_wce_cleanup (wce), kind); /* Do not use gsi_replace here, as it may scan operands. We want to do a simple structural modification only. */ gsi_set_stmt (&iter, gtry); iter = gsi_start (gtry->eval); } } else gsi_next (&iter); } gimplify_seq_add_seq (pre_p, body_sequence); if (temp) { *expr_p = temp; return GS_OK; } else { *expr_p = NULL; return GS_ALL_DONE; } } /* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP is the cleanup action required. EH_ONLY is true if the cleanup should only be executed if an exception is thrown, not on normal exit. If FORCE_UNCOND is true perform the cleanup unconditionally; this is only valid for clobbers. */ static void gimple_push_cleanup (tree var, tree cleanup, bool eh_only, gimple_seq *pre_p, bool force_uncond = false) { gimple *wce; gimple_seq cleanup_stmts = NULL; /* Errors can result in improperly nested cleanups. Which results in confusion when trying to resolve the GIMPLE_WITH_CLEANUP_EXPR. */ if (seen_error ()) return; if (gimple_conditional_context ()) { /* If we're in a conditional context, this is more complex. We only want to run the cleanup if we actually ran the initialization that necessitates it, but we want to run it after the end of the conditional context. So we wrap the try/finally around the condition and use a flag to determine whether or not to actually run the destructor. Thus test ? f(A()) : 0 becomes (approximately) flag = 0; try { if (test) { A::A(temp); flag = 1; val = f(temp); } else { val = 0; } } finally { if (flag) A::~A(temp); } val */ if (force_uncond) { gimplify_stmt (&cleanup, &cleanup_stmts); wce = gimple_build_wce (cleanup_stmts); gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, wce); } else { tree flag = create_tmp_var (boolean_type_node, "cleanup"); gassign *ffalse = gimple_build_assign (flag, boolean_false_node); gassign *ftrue = gimple_build_assign (flag, boolean_true_node); cleanup = build3 (COND_EXPR, void_type_node, flag, cleanup, NULL); gimplify_stmt (&cleanup, &cleanup_stmts); wce = gimple_build_wce (cleanup_stmts); gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, ffalse); gimplify_seq_add_stmt (&gimplify_ctxp->conditional_cleanups, wce); gimplify_seq_add_stmt (pre_p, ftrue); /* Because of this manipulation, and the EH edges that jump threading cannot redirect, the temporary (VAR) will appear to be used uninitialized. Don't warn. */ TREE_NO_WARNING (var) = 1; } } else { gimplify_stmt (&cleanup, &cleanup_stmts); wce = gimple_build_wce (cleanup_stmts); gimple_wce_set_cleanup_eh_only (wce, eh_only); gimplify_seq_add_stmt (pre_p, wce); } } /* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */ static enum gimplify_status gimplify_target_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p) { tree targ = *expr_p; tree temp = TARGET_EXPR_SLOT (targ); tree init = TARGET_EXPR_INITIAL (targ); enum gimplify_status ret; bool unpoison_empty_seq = false; gimple_stmt_iterator unpoison_it; if (init) { tree cleanup = NULL_TREE; /* TARGET_EXPR temps aren't part of the enclosing block, so add it to the temps list. Handle also variable length TARGET_EXPRs. */ if (!poly_int_tree_p (DECL_SIZE (temp))) { if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (temp))) gimplify_type_sizes (TREE_TYPE (temp), pre_p); gimplify_vla_decl (temp, pre_p); } else { /* Save location where we need to place unpoisoning. It's possible that a variable will be converted to needs_to_live_in_memory. */ unpoison_it = gsi_last (*pre_p); unpoison_empty_seq = gsi_end_p (unpoison_it); gimple_add_tmp_var (temp); } /* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the expression is supposed to initialize the slot. */ if (VOID_TYPE_P (TREE_TYPE (init))) ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); else { tree init_expr = build2 (INIT_EXPR, void_type_node, temp, init); init = init_expr; ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none); init = NULL; ggc_free (init_expr); } if (ret == GS_ERROR) { /* PR c++/28266 Make sure this is expanded only once. */ TARGET_EXPR_INITIAL (targ) = NULL_TREE; return GS_ERROR; } if (init) gimplify_and_add (init, pre_p); /* If needed, push the cleanup for the temp. */ if (TARGET_EXPR_CLEANUP (targ)) { if (CLEANUP_EH_ONLY (targ)) gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ), CLEANUP_EH_ONLY (targ), pre_p); else cleanup = TARGET_EXPR_CLEANUP (targ); } /* Add a clobber for the temporary going out of scope, like gimplify_bind_expr. */ if (gimplify_ctxp->in_cleanup_point_expr && needs_to_live_in_memory (temp)) { if (flag_stack_reuse == SR_ALL) { tree clobber = build_clobber (TREE_TYPE (temp)); clobber = build2 (MODIFY_EXPR, TREE_TYPE (temp), temp, clobber); gimple_push_cleanup (temp, clobber, false, pre_p, true); } if (asan_poisoned_variables && DECL_ALIGN (temp) <= MAX_SUPPORTED_STACK_ALIGNMENT && !TREE_STATIC (temp) && dbg_cnt (asan_use_after_scope) && !gimplify_omp_ctxp) { tree asan_cleanup = build_asan_poison_call_expr (temp); if (asan_cleanup) { if (unpoison_empty_seq) unpoison_it = gsi_start (*pre_p); asan_poison_variable (temp, false, &unpoison_it, unpoison_empty_seq); gimple_push_cleanup (temp, asan_cleanup, false, pre_p); } } } if (cleanup) gimple_push_cleanup (temp, cleanup, false, pre_p); /* Only expand this once. */ TREE_OPERAND (targ, 3) = init; TARGET_EXPR_INITIAL (targ) = NULL_TREE; } else /* We should have expanded this before. */ gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp)); *expr_p = temp; return GS_OK; } /* Gimplification of expression trees. */ /* Gimplify an expression which appears at statement context. The corresponding GIMPLE statements are added to *SEQ_P. If *SEQ_P is NULL, a new sequence is allocated. Return true if we actually added a statement to the queue. */ bool gimplify_stmt (tree *stmt_p, gimple_seq *seq_p) { gimple_seq_node last; last = gimple_seq_last (*seq_p); gimplify_expr (stmt_p, seq_p, NULL, is_gimple_stmt, fb_none); return last != gimple_seq_last (*seq_p); } /* Add FIRSTPRIVATE entries for DECL in the OpenMP the surrounding parallels to CTX. If entries already exist, force them to be some flavor of private. If there is no enclosing parallel, do nothing. */ void omp_firstprivatize_variable (struct gimplify_omp_ctx *ctx, tree decl) { splay_tree_node n; if (decl == NULL || !DECL_P (decl) || ctx->region_type == ORT_NONE) return; do { n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { if (n->value & GOVD_SHARED) n->value = GOVD_FIRSTPRIVATE | (n->value & GOVD_SEEN); else if (n->value & GOVD_MAP) n->value |= GOVD_MAP_TO_ONLY; else return; } else if ((ctx->region_type & ORT_TARGET) != 0) { if (ctx->defaultmap[GDMK_SCALAR] & GOVD_FIRSTPRIVATE) omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE); else omp_add_variable (ctx, decl, GOVD_MAP | GOVD_MAP_TO_ONLY); } else if (ctx->region_type != ORT_WORKSHARE && ctx->region_type != ORT_TASKGROUP && ctx->region_type != ORT_SIMD && ctx->region_type != ORT_ACC && !(ctx->region_type & ORT_TARGET_DATA)) omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE); ctx = ctx->outer_context; } while (ctx); } /* Similarly for each of the type sizes of TYPE. */ static void omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *ctx, tree type) { if (type == NULL || type == error_mark_node) return; type = TYPE_MAIN_VARIANT (type); if (ctx->privatized_types->add (type)) return; switch (TREE_CODE (type)) { case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case REAL_TYPE: case FIXED_POINT_TYPE: omp_firstprivatize_variable (ctx, TYPE_MIN_VALUE (type)); omp_firstprivatize_variable (ctx, TYPE_MAX_VALUE (type)); break; case ARRAY_TYPE: omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type)); omp_firstprivatize_type_sizes (ctx, TYPE_DOMAIN (type)); break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { tree field; for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { omp_firstprivatize_variable (ctx, DECL_FIELD_OFFSET (field)); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (field)); } } break; case POINTER_TYPE: case REFERENCE_TYPE: omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type)); break; default: break; } omp_firstprivatize_variable (ctx, TYPE_SIZE (type)); omp_firstprivatize_variable (ctx, TYPE_SIZE_UNIT (type)); lang_hooks.types.omp_firstprivatize_type_sizes (ctx, type); } /* Add an entry for DECL in the OMP context CTX with FLAGS. */ static void omp_add_variable (struct gimplify_omp_ctx *ctx, tree decl, unsigned int flags) { splay_tree_node n; unsigned int nflags; tree t; if (error_operand_p (decl) || ctx->region_type == ORT_NONE) return; /* Never elide decls whose type has TREE_ADDRESSABLE set. This means there are constructors involved somewhere. Exception is a shared clause, there is nothing privatized in that case. */ if ((flags & GOVD_SHARED) == 0 && (TREE_ADDRESSABLE (TREE_TYPE (decl)) || TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl)))) flags |= GOVD_SEEN; n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL && (n->value & GOVD_DATA_SHARE_CLASS) != 0) { /* We shouldn't be re-adding the decl with the same data sharing class. */ gcc_assert ((n->value & GOVD_DATA_SHARE_CLASS & flags) == 0); nflags = n->value | flags; /* The only combination of data sharing classes we should see is FIRSTPRIVATE and LASTPRIVATE. However, OpenACC permits reduction variables to be used in data sharing clauses. */ gcc_assert ((ctx->region_type & ORT_ACC) != 0 || ((nflags & GOVD_DATA_SHARE_CLASS) == (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE)) || (flags & GOVD_DATA_SHARE_CLASS) == 0); n->value = nflags; return; } /* When adding a variable-sized variable, we have to handle all sorts of additional bits of data: the pointer replacement variable, and the parameters of the type. */ if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { /* Add the pointer replacement variable as PRIVATE if the variable replacement is private, else FIRSTPRIVATE since we'll need the address of the original variable either for SHARED, or for the copy into or out of the context. */ if (!(flags & GOVD_LOCAL) && ctx->region_type != ORT_TASKGROUP) { if (flags & GOVD_MAP) nflags = GOVD_MAP | GOVD_MAP_TO_ONLY | GOVD_EXPLICIT; else if (flags & GOVD_PRIVATE) nflags = GOVD_PRIVATE; else if (((ctx->region_type & (ORT_TARGET | ORT_TARGET_DATA)) != 0 && (flags & GOVD_FIRSTPRIVATE)) || (ctx->region_type == ORT_TARGET_DATA && (flags & GOVD_DATA_SHARE_CLASS) == 0)) nflags = GOVD_PRIVATE | GOVD_EXPLICIT; else nflags = GOVD_FIRSTPRIVATE; nflags |= flags & GOVD_SEEN; t = DECL_VALUE_EXPR (decl); gcc_assert (TREE_CODE (t) == INDIRECT_REF); t = TREE_OPERAND (t, 0); gcc_assert (DECL_P (t)); omp_add_variable (ctx, t, nflags); } /* Add all of the variable and type parameters (which should have been gimplified to a formal temporary) as FIRSTPRIVATE. */ omp_firstprivatize_variable (ctx, DECL_SIZE_UNIT (decl)); omp_firstprivatize_variable (ctx, DECL_SIZE (decl)); omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl)); /* The variable-sized variable itself is never SHARED, only some form of PRIVATE. The sharing would take place via the pointer variable which we remapped above. */ if (flags & GOVD_SHARED) flags = GOVD_SHARED | GOVD_DEBUG_PRIVATE | (flags & (GOVD_SEEN | GOVD_EXPLICIT)); /* We're going to make use of the TYPE_SIZE_UNIT at least in the alloca statement we generate for the variable, so make sure it is available. This isn't automatically needed for the SHARED case, since we won't be allocating local storage then. For local variables TYPE_SIZE_UNIT might not be gimplified yet, in this case omp_notice_variable will be called later on when it is gimplified. */ else if (! (flags & (GOVD_LOCAL | GOVD_MAP)) && DECL_P (TYPE_SIZE_UNIT (TREE_TYPE (decl)))) omp_notice_variable (ctx, TYPE_SIZE_UNIT (TREE_TYPE (decl)), true); } else if ((flags & (GOVD_MAP | GOVD_LOCAL)) == 0 && lang_hooks.decls.omp_privatize_by_reference (decl)) { omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl)); /* Similar to the direct variable sized case above, we'll need the size of references being privatized. */ if ((flags & GOVD_SHARED) == 0) { t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))); if (DECL_P (t)) omp_notice_variable (ctx, t, true); } } if (n != NULL) n->value |= flags; else splay_tree_insert (ctx->variables, (splay_tree_key)decl, flags); /* For reductions clauses in OpenACC loop directives, by default create a copy clause on the enclosing parallel construct for carrying back the results. */ if (ctx->region_type == ORT_ACC && (flags & GOVD_REDUCTION)) { struct gimplify_omp_ctx *outer_ctx = ctx->outer_context; while (outer_ctx) { n = splay_tree_lookup (outer_ctx->variables, (splay_tree_key)decl); if (n != NULL) { /* Ignore local variables and explicitly declared clauses. */ if (n->value & (GOVD_LOCAL | GOVD_EXPLICIT)) break; else if (outer_ctx->region_type == ORT_ACC_KERNELS) { /* According to the OpenACC spec, such a reduction variable should already have a copy map on a kernels construct, verify that here. */ gcc_assert (!(n->value & GOVD_FIRSTPRIVATE) && (n->value & GOVD_MAP)); } else if (outer_ctx->region_type == ORT_ACC_PARALLEL) { /* Remove firstprivate and make it a copy map. */ n->value &= ~GOVD_FIRSTPRIVATE; n->value |= GOVD_MAP; } } else if (outer_ctx->region_type == ORT_ACC_PARALLEL) { splay_tree_insert (outer_ctx->variables, (splay_tree_key)decl, GOVD_MAP | GOVD_SEEN); break; } outer_ctx = outer_ctx->outer_context; } } } /* Notice a threadprivate variable DECL used in OMP context CTX. This just prints out diagnostics about threadprivate variable uses in untied tasks. If DECL2 is non-NULL, prevent this warning on that variable. */ static bool omp_notice_threadprivate_variable (struct gimplify_omp_ctx *ctx, tree decl, tree decl2) { splay_tree_node n; struct gimplify_omp_ctx *octx; for (octx = ctx; octx; octx = octx->outer_context) if ((octx->region_type & ORT_TARGET) != 0 || octx->order_concurrent) { n = splay_tree_lookup (octx->variables, (splay_tree_key)decl); if (n == NULL) { if (octx->order_concurrent) { error ("threadprivate variable %qE used in a region with" " %<order(concurrent)%> clause", DECL_NAME (decl)); inform (octx->location, "enclosing region"); } else { error ("threadprivate variable %qE used in target region", DECL_NAME (decl)); inform (octx->location, "enclosing target region"); } splay_tree_insert (octx->variables, (splay_tree_key)decl, 0); } if (decl2) splay_tree_insert (octx->variables, (splay_tree_key)decl2, 0); } if (ctx->region_type != ORT_UNTIED_TASK) return false; n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n == NULL) { error ("threadprivate variable %qE used in untied task", DECL_NAME (decl)); inform (ctx->location, "enclosing task"); splay_tree_insert (ctx->variables, (splay_tree_key)decl, 0); } if (decl2) splay_tree_insert (ctx->variables, (splay_tree_key)decl2, 0); return false; } /* Return true if global var DECL is device resident. */ static bool device_resident_p (tree decl) { tree attr = lookup_attribute ("oacc declare target", DECL_ATTRIBUTES (decl)); if (!attr) return false; for (tree t = TREE_VALUE (attr); t; t = TREE_PURPOSE (t)) { tree c = TREE_VALUE (t); if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DEVICE_RESIDENT) return true; } return false; } /* Return true if DECL has an ACC DECLARE attribute. */ static bool is_oacc_declared (tree decl) { tree t = TREE_CODE (decl) == MEM_REF ? TREE_OPERAND (decl, 0) : decl; tree declared = lookup_attribute ("oacc declare target", DECL_ATTRIBUTES (t)); return declared != NULL_TREE; } /* Determine outer default flags for DECL mentioned in an OMP region but not declared in an enclosing clause. ??? Some compiler-generated variables (like SAVE_EXPRs) could be remapped firstprivate instead of shared. To some extent this is addressed in omp_firstprivatize_type_sizes, but not effectively. */ static unsigned omp_default_clause (struct gimplify_omp_ctx *ctx, tree decl, bool in_code, unsigned flags) { enum omp_clause_default_kind default_kind = ctx->default_kind; enum omp_clause_default_kind kind; kind = lang_hooks.decls.omp_predetermined_sharing (decl); if (kind != OMP_CLAUSE_DEFAULT_UNSPECIFIED) default_kind = kind; else if (VAR_P (decl) && TREE_STATIC (decl) && DECL_IN_CONSTANT_POOL (decl)) default_kind = OMP_CLAUSE_DEFAULT_SHARED; switch (default_kind) { case OMP_CLAUSE_DEFAULT_NONE: { const char *rtype; if (ctx->region_type & ORT_PARALLEL) rtype = "parallel"; else if ((ctx->region_type & ORT_TASKLOOP) == ORT_TASKLOOP) rtype = "taskloop"; else if (ctx->region_type & ORT_TASK) rtype = "task"; else if (ctx->region_type & ORT_TEAMS) rtype = "teams"; else gcc_unreachable (); error ("%qE not specified in enclosing %qs", DECL_NAME (lang_hooks.decls.omp_report_decl (decl)), rtype); inform (ctx->location, "enclosing %qs", rtype); } /* FALLTHRU */ case OMP_CLAUSE_DEFAULT_SHARED: flags |= GOVD_SHARED; break; case OMP_CLAUSE_DEFAULT_PRIVATE: flags |= GOVD_PRIVATE; break; case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE: flags |= GOVD_FIRSTPRIVATE; break; case OMP_CLAUSE_DEFAULT_UNSPECIFIED: /* decl will be either GOVD_FIRSTPRIVATE or GOVD_SHARED. */ gcc_assert ((ctx->region_type & ORT_TASK) != 0); if (struct gimplify_omp_ctx *octx = ctx->outer_context) { omp_notice_variable (octx, decl, in_code); for (; octx; octx = octx->outer_context) { splay_tree_node n2; n2 = splay_tree_lookup (octx->variables, (splay_tree_key) decl); if ((octx->region_type & (ORT_TARGET_DATA | ORT_TARGET)) != 0 && (n2 == NULL || (n2->value & GOVD_DATA_SHARE_CLASS) == 0)) continue; if (n2 && (n2->value & GOVD_DATA_SHARE_CLASS) != GOVD_SHARED) { flags |= GOVD_FIRSTPRIVATE; goto found_outer; } if ((octx->region_type & (ORT_PARALLEL | ORT_TEAMS)) != 0) { flags |= GOVD_SHARED; goto found_outer; } } } if (TREE_CODE (decl) == PARM_DECL || (!is_global_var (decl) && DECL_CONTEXT (decl) == current_function_decl)) flags |= GOVD_FIRSTPRIVATE; else flags |= GOVD_SHARED; found_outer: break; default: gcc_unreachable (); } return flags; } /* Determine outer default flags for DECL mentioned in an OACC region but not declared in an enclosing clause. */ static unsigned oacc_default_clause (struct gimplify_omp_ctx *ctx, tree decl, unsigned flags) { const char *rkind; bool on_device = false; bool is_private = false; bool declared = is_oacc_declared (decl); tree type = TREE_TYPE (decl); if (lang_hooks.decls.omp_privatize_by_reference (decl)) type = TREE_TYPE (type); /* For Fortran COMMON blocks, only used variables in those blocks are transfered and remapped. The block itself will have a private clause to avoid transfering the data twice. The hook evaluates to false by default. For a variable in Fortran's COMMON or EQUIVALENCE block, returns 'true' (as we have shared=false) - as only the variables in such a COMMON/EQUIVALENCE block shall be privatized not the whole block. For C++ and Fortran, it can also be true under certain other conditions, if DECL_HAS_VALUE_EXPR. */ if (RECORD_OR_UNION_TYPE_P (type)) is_private = lang_hooks.decls.omp_disregard_value_expr (decl, false); if ((ctx->region_type & (ORT_ACC_PARALLEL | ORT_ACC_KERNELS)) != 0 && is_global_var (decl) && device_resident_p (decl) && !is_private) { on_device = true; flags |= GOVD_MAP_TO_ONLY; } switch (ctx->region_type) { case ORT_ACC_KERNELS: rkind = "kernels"; if (is_private) flags |= GOVD_FIRSTPRIVATE; else if (AGGREGATE_TYPE_P (type)) { /* Aggregates default to 'present_or_copy', or 'present'. */ if (ctx->default_kind != OMP_CLAUSE_DEFAULT_PRESENT) flags |= GOVD_MAP; else flags |= GOVD_MAP | GOVD_MAP_FORCE_PRESENT; } else /* Scalars default to 'copy'. */ flags |= GOVD_MAP | GOVD_MAP_FORCE; break; case ORT_ACC_PARALLEL: case ORT_ACC_SERIAL: rkind = ctx->region_type == ORT_ACC_PARALLEL ? "parallel" : "serial"; if (is_private) flags |= GOVD_FIRSTPRIVATE; else if (on_device || declared) flags |= GOVD_MAP; else if (AGGREGATE_TYPE_P (type)) { /* Aggregates default to 'present_or_copy', or 'present'. */ if (ctx->default_kind != OMP_CLAUSE_DEFAULT_PRESENT) flags |= GOVD_MAP; else flags |= GOVD_MAP | GOVD_MAP_FORCE_PRESENT; } else /* Scalars default to 'firstprivate'. */ flags |= GOVD_FIRSTPRIVATE; break; default: gcc_unreachable (); } if (DECL_ARTIFICIAL (decl)) ; /* We can get compiler-generated decls, and should not complain about them. */ else if (ctx->default_kind == OMP_CLAUSE_DEFAULT_NONE) { error ("%qE not specified in enclosing OpenACC %qs construct", DECL_NAME (lang_hooks.decls.omp_report_decl (decl)), rkind); inform (ctx->location, "enclosing OpenACC %qs construct", rkind); } else if (ctx->default_kind == OMP_CLAUSE_DEFAULT_PRESENT) ; /* Handled above. */ else gcc_checking_assert (ctx->default_kind == OMP_CLAUSE_DEFAULT_SHARED); return flags; } /* Record the fact that DECL was used within the OMP context CTX. IN_CODE is true when real code uses DECL, and false when we should merely emit default(none) errors. Return true if DECL is going to be remapped and thus DECL shouldn't be gimplified into its DECL_VALUE_EXPR (if any). */ static bool omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code) { splay_tree_node n; unsigned flags = in_code ? GOVD_SEEN : 0; bool ret = false, shared; if (error_operand_p (decl)) return false; if (ctx->region_type == ORT_NONE) return lang_hooks.decls.omp_disregard_value_expr (decl, false); if (is_global_var (decl)) { /* Threadprivate variables are predetermined. */ if (DECL_THREAD_LOCAL_P (decl)) return omp_notice_threadprivate_variable (ctx, decl, NULL_TREE); if (DECL_HAS_VALUE_EXPR_P (decl)) { if (ctx->region_type & ORT_ACC) /* For OpenACC, defer expansion of value to avoid transfering privatized common block data instead of im-/explicitly transfered variables which are in common blocks. */ ; else { tree value = get_base_address (DECL_VALUE_EXPR (decl)); if (value && DECL_P (value) && DECL_THREAD_LOCAL_P (value)) return omp_notice_threadprivate_variable (ctx, decl, value); } } if (gimplify_omp_ctxp->outer_context == NULL && VAR_P (decl) && oacc_get_fn_attrib (current_function_decl)) { location_t loc = DECL_SOURCE_LOCATION (decl); if (lookup_attribute ("omp declare target link", DECL_ATTRIBUTES (decl))) { error_at (loc, "%qE with %<link%> clause used in %<routine%> function", DECL_NAME (decl)); return false; } else if (!lookup_attribute ("omp declare target", DECL_ATTRIBUTES (decl))) { error_at (loc, "%qE requires a %<declare%> directive for use " "in a %<routine%> function", DECL_NAME (decl)); return false; } } } n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if ((ctx->region_type & ORT_TARGET) != 0) { if (ctx->region_type & ORT_ACC) /* For OpenACC, as remarked above, defer expansion. */ shared = false; else shared = true; ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared); if (n == NULL) { unsigned nflags = flags; if ((ctx->region_type & ORT_ACC) == 0) { bool is_declare_target = false; if (is_global_var (decl) && varpool_node::get_create (decl)->offloadable) { struct gimplify_omp_ctx *octx; for (octx = ctx->outer_context; octx; octx = octx->outer_context) { n = splay_tree_lookup (octx->variables, (splay_tree_key)decl); if (n && (n->value & GOVD_DATA_SHARE_CLASS) != GOVD_SHARED && (n->value & GOVD_DATA_SHARE_CLASS) != 0) break; } is_declare_target = octx == NULL; } if (!is_declare_target) { int gdmk; enum omp_clause_defaultmap_kind kind; if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE || (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE && (TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == POINTER_TYPE))) gdmk = GDMK_POINTER; else if (lang_hooks.decls.omp_scalar_p (decl)) gdmk = GDMK_SCALAR; else gdmk = GDMK_AGGREGATE; kind = lang_hooks.decls.omp_predetermined_mapping (decl); if (kind != OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED) { if (kind == OMP_CLAUSE_DEFAULTMAP_FIRSTPRIVATE) nflags |= GOVD_FIRSTPRIVATE; else if (kind == OMP_CLAUSE_DEFAULTMAP_TO) nflags |= GOVD_MAP | GOVD_MAP_TO_ONLY; else gcc_unreachable (); } else if (ctx->defaultmap[gdmk] == 0) { tree d = lang_hooks.decls.omp_report_decl (decl); error ("%qE not specified in enclosing %<target%>", DECL_NAME (d)); inform (ctx->location, "enclosing %<target%>"); } else if (ctx->defaultmap[gdmk] & (GOVD_MAP_0LEN_ARRAY | GOVD_FIRSTPRIVATE)) nflags |= ctx->defaultmap[gdmk]; else { gcc_assert (ctx->defaultmap[gdmk] & GOVD_MAP); nflags |= ctx->defaultmap[gdmk] & ~GOVD_MAP; } } } struct gimplify_omp_ctx *octx = ctx->outer_context; if ((ctx->region_type & ORT_ACC) && octx) { /* Look in outer OpenACC contexts, to see if there's a data attribute for this variable. */ omp_notice_variable (octx, decl, in_code); for (; octx; octx = octx->outer_context) { if (!(octx->region_type & (ORT_TARGET_DATA | ORT_TARGET))) break; splay_tree_node n2 = splay_tree_lookup (octx->variables, (splay_tree_key) decl); if (n2) { if (octx->region_type == ORT_ACC_HOST_DATA) error ("variable %qE declared in enclosing " "%<host_data%> region", DECL_NAME (decl)); nflags |= GOVD_MAP; if (octx->region_type == ORT_ACC_DATA && (n2->value & GOVD_MAP_0LEN_ARRAY)) nflags |= GOVD_MAP_0LEN_ARRAY; goto found_outer; } } } if ((nflags & ~(GOVD_MAP_TO_ONLY | GOVD_MAP_FROM_ONLY | GOVD_MAP_ALLOC_ONLY)) == flags) { tree type = TREE_TYPE (decl); if (gimplify_omp_ctxp->target_firstprivatize_array_bases && lang_hooks.decls.omp_privatize_by_reference (decl)) type = TREE_TYPE (type); if (!lang_hooks.types.omp_mappable_type (type)) { error ("%qD referenced in target region does not have " "a mappable type", decl); nflags |= GOVD_MAP | GOVD_EXPLICIT; } else { if ((ctx->region_type & ORT_ACC) != 0) nflags = oacc_default_clause (ctx, decl, flags); else nflags |= GOVD_MAP; } } found_outer: omp_add_variable (ctx, decl, nflags); } else { /* If nothing changed, there's nothing left to do. */ if ((n->value & flags) == flags) return ret; flags |= n->value; n->value = flags; } goto do_outer; } if (n == NULL) { if (ctx->region_type == ORT_WORKSHARE || ctx->region_type == ORT_TASKGROUP || ctx->region_type == ORT_SIMD || ctx->region_type == ORT_ACC || (ctx->region_type & ORT_TARGET_DATA) != 0) goto do_outer; flags = omp_default_clause (ctx, decl, in_code, flags); if ((flags & GOVD_PRIVATE) && lang_hooks.decls.omp_private_outer_ref (decl)) flags |= GOVD_PRIVATE_OUTER_REF; omp_add_variable (ctx, decl, flags); shared = (flags & GOVD_SHARED) != 0; ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared); goto do_outer; } if ((n->value & (GOVD_SEEN | GOVD_LOCAL)) == 0 && (flags & (GOVD_SEEN | GOVD_LOCAL)) == GOVD_SEEN && DECL_SIZE (decl)) { if (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { splay_tree_node n2; tree t = DECL_VALUE_EXPR (decl); gcc_assert (TREE_CODE (t) == INDIRECT_REF); t = TREE_OPERAND (t, 0); gcc_assert (DECL_P (t)); n2 = splay_tree_lookup (ctx->variables, (splay_tree_key) t); n2->value |= GOVD_SEEN; } else if (lang_hooks.decls.omp_privatize_by_reference (decl) && TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))) && (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)))) != INTEGER_CST)) { splay_tree_node n2; tree t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))); gcc_assert (DECL_P (t)); n2 = splay_tree_lookup (ctx->variables, (splay_tree_key) t); if (n2) omp_notice_variable (ctx, t, true); } } if (ctx->region_type & ORT_ACC) /* For OpenACC, as remarked above, defer expansion. */ shared = false; else shared = ((flags | n->value) & GOVD_SHARED) != 0; ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared); /* If nothing changed, there's nothing left to do. */ if ((n->value & flags) == flags) return ret; flags |= n->value; n->value = flags; do_outer: /* If the variable is private in the current context, then we don't need to propagate anything to an outer context. */ if ((flags & GOVD_PRIVATE) && !(flags & GOVD_PRIVATE_OUTER_REF)) return ret; if ((flags & (GOVD_LINEAR | GOVD_LINEAR_LASTPRIVATE_NO_OUTER)) == (GOVD_LINEAR | GOVD_LINEAR_LASTPRIVATE_NO_OUTER)) return ret; if ((flags & (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE | GOVD_LINEAR_LASTPRIVATE_NO_OUTER)) == (GOVD_LASTPRIVATE | GOVD_LINEAR_LASTPRIVATE_NO_OUTER)) return ret; if (ctx->outer_context && omp_notice_variable (ctx->outer_context, decl, in_code)) return true; return ret; } /* Verify that DECL is private within CTX. If there's specific information to the contrary in the innermost scope, generate an error. */ static bool omp_is_private (struct gimplify_omp_ctx *ctx, tree decl, int simd) { splay_tree_node n; n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); if (n != NULL) { if (n->value & GOVD_SHARED) { if (ctx == gimplify_omp_ctxp) { if (simd) error ("iteration variable %qE is predetermined linear", DECL_NAME (decl)); else error ("iteration variable %qE should be private", DECL_NAME (decl)); n->value = GOVD_PRIVATE; return true; } else return false; } else if ((n->value & GOVD_EXPLICIT) != 0 && (ctx == gimplify_omp_ctxp || (ctx->region_type == ORT_COMBINED_PARALLEL && gimplify_omp_ctxp->outer_context == ctx))) { if ((n->value & GOVD_FIRSTPRIVATE) != 0) error ("iteration variable %qE should not be firstprivate", DECL_NAME (decl)); else if ((n->value & GOVD_REDUCTION) != 0) error ("iteration variable %qE should not be reduction", DECL_NAME (decl)); else if (simd != 1 && (n->value & GOVD_LINEAR) != 0) error ("iteration variable %qE should not be linear", DECL_NAME (decl)); } return (ctx == gimplify_omp_ctxp || (ctx->region_type == ORT_COMBINED_PARALLEL && gimplify_omp_ctxp->outer_context == ctx)); } if (ctx->region_type != ORT_WORKSHARE && ctx->region_type != ORT_TASKGROUP && ctx->region_type != ORT_SIMD && ctx->region_type != ORT_ACC) return false; else if (ctx->outer_context) return omp_is_private (ctx->outer_context, decl, simd); return false; } /* Return true if DECL is private within a parallel region that binds to the current construct's context or in parallel region's REDUCTION clause. */ static bool omp_check_private (struct gimplify_omp_ctx *ctx, tree decl, bool copyprivate) { splay_tree_node n; do { ctx = ctx->outer_context; if (ctx == NULL) { if (is_global_var (decl)) return false; /* References might be private, but might be shared too, when checking for copyprivate, assume they might be private, otherwise assume they might be shared. */ if (copyprivate) return true; if (lang_hooks.decls.omp_privatize_by_reference (decl)) return false; /* Treat C++ privatized non-static data members outside of the privatization the same. */ if (omp_member_access_dummy_var (decl)) return false; return true; } n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if ((ctx->region_type & (ORT_TARGET | ORT_TARGET_DATA)) != 0 && (n == NULL || (n->value & GOVD_DATA_SHARE_CLASS) == 0)) continue; if (n != NULL) { if ((n->value & GOVD_LOCAL) != 0 && omp_member_access_dummy_var (decl)) return false; return (n->value & GOVD_SHARED) == 0; } } while (ctx->region_type == ORT_WORKSHARE || ctx->region_type == ORT_TASKGROUP || ctx->region_type == ORT_SIMD || ctx->region_type == ORT_ACC); return false; } /* Callback for walk_tree to find a DECL_EXPR for the given DECL. */ static tree find_decl_expr (tree *tp, int *walk_subtrees, void *data) { tree t = *tp; /* If this node has been visited, unmark it and keep looking. */ if (TREE_CODE (t) == DECL_EXPR && DECL_EXPR_DECL (t) == (tree) data) return t; if (IS_TYPE_OR_DECL_P (t)) *walk_subtrees = 0; return NULL_TREE; } /* If *LIST_P contains any OpenMP depend clauses with iterators, lower all the depend clauses by populating corresponding depend array. Returns 0 if there are no such depend clauses, or 2 if all depend clauses should be removed, 1 otherwise. */ static int gimplify_omp_depend (tree *list_p, gimple_seq *pre_p) { tree c; gimple *g; size_t n[4] = { 0, 0, 0, 0 }; bool unused[4]; tree counts[4] = { NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE }; tree last_iter = NULL_TREE, last_count = NULL_TREE; size_t i, j; location_t first_loc = UNKNOWN_LOCATION; for (c = *list_p; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND) { switch (OMP_CLAUSE_DEPEND_KIND (c)) { case OMP_CLAUSE_DEPEND_IN: i = 2; break; case OMP_CLAUSE_DEPEND_OUT: case OMP_CLAUSE_DEPEND_INOUT: i = 0; break; case OMP_CLAUSE_DEPEND_MUTEXINOUTSET: i = 1; break; case OMP_CLAUSE_DEPEND_DEPOBJ: i = 3; break; case OMP_CLAUSE_DEPEND_SOURCE: case OMP_CLAUSE_DEPEND_SINK: continue; default: gcc_unreachable (); } tree t = OMP_CLAUSE_DECL (c); if (first_loc == UNKNOWN_LOCATION) first_loc = OMP_CLAUSE_LOCATION (c); if (TREE_CODE (t) == TREE_LIST && TREE_PURPOSE (t) && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC) { if (TREE_PURPOSE (t) != last_iter) { tree tcnt = size_one_node; for (tree it = TREE_PURPOSE (t); it; it = TREE_CHAIN (it)) { if (gimplify_expr (&TREE_VEC_ELT (it, 1), pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR || gimplify_expr (&TREE_VEC_ELT (it, 2), pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR || gimplify_expr (&TREE_VEC_ELT (it, 3), pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR || (gimplify_expr (&TREE_VEC_ELT (it, 4), pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR)) return 2; tree var = TREE_VEC_ELT (it, 0); tree begin = TREE_VEC_ELT (it, 1); tree end = TREE_VEC_ELT (it, 2); tree step = TREE_VEC_ELT (it, 3); tree orig_step = TREE_VEC_ELT (it, 4); tree type = TREE_TYPE (var); tree stype = TREE_TYPE (step); location_t loc = DECL_SOURCE_LOCATION (var); tree endmbegin; /* Compute count for this iterator as orig_step > 0 ? (begin < end ? (end - begin + (step - 1)) / step : 0) : (begin > end ? (end - begin + (step + 1)) / step : 0) and compute product of those for the entire depend clause. */ if (POINTER_TYPE_P (type)) endmbegin = fold_build2_loc (loc, POINTER_DIFF_EXPR, stype, end, begin); else endmbegin = fold_build2_loc (loc, MINUS_EXPR, type, end, begin); tree stepm1 = fold_build2_loc (loc, MINUS_EXPR, stype, step, build_int_cst (stype, 1)); tree stepp1 = fold_build2_loc (loc, PLUS_EXPR, stype, step, build_int_cst (stype, 1)); tree pos = fold_build2_loc (loc, PLUS_EXPR, stype, unshare_expr (endmbegin), stepm1); pos = fold_build2_loc (loc, TRUNC_DIV_EXPR, stype, pos, step); tree neg = fold_build2_loc (loc, PLUS_EXPR, stype, endmbegin, stepp1); if (TYPE_UNSIGNED (stype)) { neg = fold_build1_loc (loc, NEGATE_EXPR, stype, neg); step = fold_build1_loc (loc, NEGATE_EXPR, stype, step); } neg = fold_build2_loc (loc, TRUNC_DIV_EXPR, stype, neg, step); step = NULL_TREE; tree cond = fold_build2_loc (loc, LT_EXPR, boolean_type_node, begin, end); pos = fold_build3_loc (loc, COND_EXPR, stype, cond, pos, build_int_cst (stype, 0)); cond = fold_build2_loc (loc, LT_EXPR, boolean_type_node, end, begin); neg = fold_build3_loc (loc, COND_EXPR, stype, cond, neg, build_int_cst (stype, 0)); tree osteptype = TREE_TYPE (orig_step); cond = fold_build2_loc (loc, GT_EXPR, boolean_type_node, orig_step, build_int_cst (osteptype, 0)); tree cnt = fold_build3_loc (loc, COND_EXPR, stype, cond, pos, neg); cnt = fold_convert_loc (loc, sizetype, cnt); if (gimplify_expr (&cnt, pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) return 2; tcnt = size_binop_loc (loc, MULT_EXPR, tcnt, cnt); } if (gimplify_expr (&tcnt, pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) return 2; last_iter = TREE_PURPOSE (t); last_count = tcnt; } if (counts[i] == NULL_TREE) counts[i] = last_count; else counts[i] = size_binop_loc (OMP_CLAUSE_LOCATION (c), PLUS_EXPR, counts[i], last_count); } else n[i]++; } for (i = 0; i < 4; i++) if (counts[i]) break; if (i == 4) return 0; tree total = size_zero_node; for (i = 0; i < 4; i++) { unused[i] = counts[i] == NULL_TREE && n[i] == 0; if (counts[i] == NULL_TREE) counts[i] = size_zero_node; if (n[i]) counts[i] = size_binop (PLUS_EXPR, counts[i], size_int (n[i])); if (gimplify_expr (&counts[i], pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) return 2; total = size_binop (PLUS_EXPR, total, counts[i]); } if (gimplify_expr (&total, pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) return 2; bool is_old = unused[1] && unused[3]; tree totalpx = size_binop (PLUS_EXPR, unshare_expr (total), size_int (is_old ? 1 : 4)); tree type = build_array_type (ptr_type_node, build_index_type (totalpx)); tree array = create_tmp_var_raw (type); TREE_ADDRESSABLE (array) = 1; if (!poly_int_tree_p (totalpx)) { if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (array))) gimplify_type_sizes (TREE_TYPE (array), pre_p); if (gimplify_omp_ctxp) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; while (ctx && (ctx->region_type == ORT_WORKSHARE || ctx->region_type == ORT_TASKGROUP || ctx->region_type == ORT_SIMD || ctx->region_type == ORT_ACC)) ctx = ctx->outer_context; if (ctx) omp_add_variable (ctx, array, GOVD_LOCAL | GOVD_SEEN); } gimplify_vla_decl (array, pre_p); } else gimple_add_tmp_var (array); tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE, NULL_TREE); tree tem; if (!is_old) { tem = build2 (MODIFY_EXPR, void_type_node, r, build_int_cst (ptr_type_node, 0)); gimplify_and_add (tem, pre_p); r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE, NULL_TREE); } tem = build2 (MODIFY_EXPR, void_type_node, r, fold_convert (ptr_type_node, total)); gimplify_and_add (tem, pre_p); for (i = 1; i < (is_old ? 2 : 4); i++) { r = build4 (ARRAY_REF, ptr_type_node, array, size_int (i + !is_old), NULL_TREE, NULL_TREE); tem = build2 (MODIFY_EXPR, void_type_node, r, counts[i - 1]); gimplify_and_add (tem, pre_p); } tree cnts[4]; for (j = 4; j; j--) if (!unused[j - 1]) break; for (i = 0; i < 4; i++) { if (i && (i >= j || unused[i - 1])) { cnts[i] = cnts[i - 1]; continue; } cnts[i] = create_tmp_var (sizetype); if (i == 0) g = gimple_build_assign (cnts[i], size_int (is_old ? 2 : 5)); else { tree t; if (is_old) t = size_binop (PLUS_EXPR, counts[0], size_int (2)); else t = size_binop (PLUS_EXPR, cnts[i - 1], counts[i - 1]); if (gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) return 2; g = gimple_build_assign (cnts[i], t); } gimple_seq_add_stmt (pre_p, g); } last_iter = NULL_TREE; tree last_bind = NULL_TREE; tree *last_body = NULL; for (c = *list_p; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND) { switch (OMP_CLAUSE_DEPEND_KIND (c)) { case OMP_CLAUSE_DEPEND_IN: i = 2; break; case OMP_CLAUSE_DEPEND_OUT: case OMP_CLAUSE_DEPEND_INOUT: i = 0; break; case OMP_CLAUSE_DEPEND_MUTEXINOUTSET: i = 1; break; case OMP_CLAUSE_DEPEND_DEPOBJ: i = 3; break; case OMP_CLAUSE_DEPEND_SOURCE: case OMP_CLAUSE_DEPEND_SINK: continue; default: gcc_unreachable (); } tree t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST && TREE_PURPOSE (t) && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC) { if (TREE_PURPOSE (t) != last_iter) { if (last_bind) gimplify_and_add (last_bind, pre_p); tree block = TREE_VEC_ELT (TREE_PURPOSE (t), 5); last_bind = build3 (BIND_EXPR, void_type_node, BLOCK_VARS (block), NULL, block); TREE_SIDE_EFFECTS (last_bind) = 1; SET_EXPR_LOCATION (last_bind, OMP_CLAUSE_LOCATION (c)); tree *p = &BIND_EXPR_BODY (last_bind); for (tree it = TREE_PURPOSE (t); it; it = TREE_CHAIN (it)) { tree var = TREE_VEC_ELT (it, 0); tree begin = TREE_VEC_ELT (it, 1); tree end = TREE_VEC_ELT (it, 2); tree step = TREE_VEC_ELT (it, 3); tree orig_step = TREE_VEC_ELT (it, 4); tree type = TREE_TYPE (var); location_t loc = DECL_SOURCE_LOCATION (var); /* Emit: var = begin; goto cond_label; beg_label: ... var = var + step; cond_label: if (orig_step > 0) { if (var < end) goto beg_label; } else { if (var > end) goto beg_label; } for each iterator, with inner iterators added to the ... above. */ tree beg_label = create_artificial_label (loc); tree cond_label = NULL_TREE; tem = build2_loc (loc, MODIFY_EXPR, void_type_node, var, begin); append_to_statement_list_force (tem, p); tem = build_and_jump (&cond_label); append_to_statement_list_force (tem, p); tem = build1 (LABEL_EXPR, void_type_node, beg_label); append_to_statement_list (tem, p); tree bind = build3 (BIND_EXPR, void_type_node, NULL_TREE, NULL_TREE, NULL_TREE); TREE_SIDE_EFFECTS (bind) = 1; SET_EXPR_LOCATION (bind, loc); append_to_statement_list_force (bind, p); if (POINTER_TYPE_P (type)) tem = build2_loc (loc, POINTER_PLUS_EXPR, type, var, fold_convert_loc (loc, sizetype, step)); else tem = build2_loc (loc, PLUS_EXPR, type, var, step); tem = build2_loc (loc, MODIFY_EXPR, void_type_node, var, tem); append_to_statement_list_force (tem, p); tem = build1 (LABEL_EXPR, void_type_node, cond_label); append_to_statement_list (tem, p); tree cond = fold_build2_loc (loc, LT_EXPR, boolean_type_node, var, end); tree pos = fold_build3_loc (loc, COND_EXPR, void_type_node, cond, build_and_jump (&beg_label), void_node); cond = fold_build2_loc (loc, GT_EXPR, boolean_type_node, var, end); tree neg = fold_build3_loc (loc, COND_EXPR, void_type_node, cond, build_and_jump (&beg_label), void_node); tree osteptype = TREE_TYPE (orig_step); cond = fold_build2_loc (loc, GT_EXPR, boolean_type_node, orig_step, build_int_cst (osteptype, 0)); tem = fold_build3_loc (loc, COND_EXPR, void_type_node, cond, pos, neg); append_to_statement_list_force (tem, p); p = &BIND_EXPR_BODY (bind); } last_body = p; } last_iter = TREE_PURPOSE (t); if (TREE_CODE (TREE_VALUE (t)) == COMPOUND_EXPR) { append_to_statement_list (TREE_OPERAND (TREE_VALUE (t), 0), last_body); TREE_VALUE (t) = TREE_OPERAND (TREE_VALUE (t), 1); } if (error_operand_p (TREE_VALUE (t))) return 2; TREE_VALUE (t) = build_fold_addr_expr (TREE_VALUE (t)); r = build4 (ARRAY_REF, ptr_type_node, array, cnts[i], NULL_TREE, NULL_TREE); tem = build2_loc (OMP_CLAUSE_LOCATION (c), MODIFY_EXPR, void_type_node, r, TREE_VALUE (t)); append_to_statement_list_force (tem, last_body); tem = build2_loc (OMP_CLAUSE_LOCATION (c), MODIFY_EXPR, void_type_node, cnts[i], size_binop (PLUS_EXPR, cnts[i], size_int (1))); append_to_statement_list_force (tem, last_body); TREE_VALUE (t) = null_pointer_node; } else { if (last_bind) { gimplify_and_add (last_bind, pre_p); last_bind = NULL_TREE; } if (TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPOUND_EXPR) { gimplify_expr (&TREE_OPERAND (OMP_CLAUSE_DECL (c), 0), pre_p, NULL, is_gimple_val, fb_rvalue); OMP_CLAUSE_DECL (c) = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1); } if (error_operand_p (OMP_CLAUSE_DECL (c))) return 2; OMP_CLAUSE_DECL (c) = build_fold_addr_expr (OMP_CLAUSE_DECL (c)); if (gimplify_expr (&OMP_CLAUSE_DECL (c), pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) return 2; r = build4 (ARRAY_REF, ptr_type_node, array, cnts[i], NULL_TREE, NULL_TREE); tem = build2 (MODIFY_EXPR, void_type_node, r, OMP_CLAUSE_DECL (c)); gimplify_and_add (tem, pre_p); g = gimple_build_assign (cnts[i], size_binop (PLUS_EXPR, cnts[i], size_int (1))); gimple_seq_add_stmt (pre_p, g); } } if (last_bind) gimplify_and_add (last_bind, pre_p); tree cond = boolean_false_node; if (is_old) { if (!unused[0]) cond = build2_loc (first_loc, NE_EXPR, boolean_type_node, cnts[0], size_binop_loc (first_loc, PLUS_EXPR, counts[0], size_int (2))); if (!unused[2]) cond = build2_loc (first_loc, TRUTH_OR_EXPR, boolean_type_node, cond, build2_loc (first_loc, NE_EXPR, boolean_type_node, cnts[2], size_binop_loc (first_loc, PLUS_EXPR, totalpx, size_int (1)))); } else { tree prev = size_int (5); for (i = 0; i < 4; i++) { if (unused[i]) continue; prev = size_binop_loc (first_loc, PLUS_EXPR, counts[i], prev); cond = build2_loc (first_loc, TRUTH_OR_EXPR, boolean_type_node, cond, build2_loc (first_loc, NE_EXPR, boolean_type_node, cnts[i], unshare_expr (prev))); } } tem = build3_loc (first_loc, COND_EXPR, void_type_node, cond, build_call_expr_loc (first_loc, builtin_decl_explicit (BUILT_IN_TRAP), 0), void_node); gimplify_and_add (tem, pre_p); c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND); OMP_CLAUSE_DEPEND_KIND (c) = OMP_CLAUSE_DEPEND_LAST; OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array); OMP_CLAUSE_CHAIN (c) = *list_p; *list_p = c; return 1; } /* Insert a GOMP_MAP_ALLOC or GOMP_MAP_RELEASE node following a GOMP_MAP_STRUCT mapping. C is an always_pointer mapping. STRUCT_NODE is the struct node to insert the new mapping after (when the struct node is initially created). PREV_NODE is the first of two or three mappings for a pointer, and is either: - the node before C, when a pair of mappings is used, e.g. for a C/C++ array section. - not the node before C. This is true when we have a reference-to-pointer type (with a mapping for the reference and for the pointer), or for Fortran derived-type mappings with a GOMP_MAP_TO_PSET. If SCP is non-null, the new node is inserted before *SCP. if SCP is null, the new node is inserted before PREV_NODE. The return type is: - PREV_NODE, if SCP is non-null. - The newly-created ALLOC or RELEASE node, if SCP is null. - The second newly-created ALLOC or RELEASE node, if we are mapping a reference to a pointer. */ static tree insert_struct_comp_map (enum tree_code code, tree c, tree struct_node, tree prev_node, tree *scp) { enum gomp_map_kind mkind = (code == OMP_TARGET_EXIT_DATA || code == OACC_EXIT_DATA) ? GOMP_MAP_RELEASE : GOMP_MAP_ALLOC; tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); tree cl = scp ? prev_node : c2; OMP_CLAUSE_SET_MAP_KIND (c2, mkind); OMP_CLAUSE_DECL (c2) = unshare_expr (OMP_CLAUSE_DECL (c)); OMP_CLAUSE_CHAIN (c2) = scp ? *scp : prev_node; if (OMP_CLAUSE_CHAIN (prev_node) != c && OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (prev_node)) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (prev_node)) == GOMP_MAP_TO_PSET)) OMP_CLAUSE_SIZE (c2) = OMP_CLAUSE_SIZE (OMP_CLAUSE_CHAIN (prev_node)); else OMP_CLAUSE_SIZE (c2) = TYPE_SIZE_UNIT (ptr_type_node); if (struct_node) OMP_CLAUSE_CHAIN (struct_node) = c2; /* We might need to create an additional mapping if we have a reference to a pointer (in C++). Don't do this if we have something other than a GOMP_MAP_ALWAYS_POINTER though, i.e. a GOMP_MAP_TO_PSET. */ if (OMP_CLAUSE_CHAIN (prev_node) != c && OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (prev_node)) == OMP_CLAUSE_MAP && ((OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (prev_node)) == GOMP_MAP_ALWAYS_POINTER) || (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (prev_node)) == GOMP_MAP_ATTACH_DETACH))) { tree c4 = OMP_CLAUSE_CHAIN (prev_node); tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (c3, mkind); OMP_CLAUSE_DECL (c3) = unshare_expr (OMP_CLAUSE_DECL (c4)); OMP_CLAUSE_SIZE (c3) = TYPE_SIZE_UNIT (ptr_type_node); OMP_CLAUSE_CHAIN (c3) = prev_node; if (!scp) OMP_CLAUSE_CHAIN (c2) = c3; else cl = c3; } if (scp) *scp = c2; return cl; } /* Strip ARRAY_REFS or an indirect ref off BASE, find the containing object, and set *BITPOSP and *POFFSETP to the bit offset of the access. If BASE_REF is non-NULL and the containing object is a reference, set *BASE_REF to that reference before dereferencing the object. If BASE_REF is NULL, check that the containing object is a COMPONENT_REF or has array type, else return NULL. */ static tree extract_base_bit_offset (tree base, tree *base_ref, poly_int64 *bitposp, poly_offset_int *poffsetp) { tree offset; poly_int64 bitsize, bitpos; machine_mode mode; int unsignedp, reversep, volatilep = 0; poly_offset_int poffset; if (base_ref) { *base_ref = NULL_TREE; while (TREE_CODE (base) == ARRAY_REF) base = TREE_OPERAND (base, 0); if (TREE_CODE (base) == INDIRECT_REF) base = TREE_OPERAND (base, 0); } else { if (TREE_CODE (base) == ARRAY_REF) { while (TREE_CODE (base) == ARRAY_REF) base = TREE_OPERAND (base, 0); if (TREE_CODE (base) != COMPONENT_REF || TREE_CODE (TREE_TYPE (base)) != ARRAY_TYPE) return NULL_TREE; } else if (TREE_CODE (base) == INDIRECT_REF && TREE_CODE (TREE_OPERAND (base, 0)) == COMPONENT_REF && (TREE_CODE (TREE_TYPE (TREE_OPERAND (base, 0))) == REFERENCE_TYPE)) base = TREE_OPERAND (base, 0); } base = get_inner_reference (base, &bitsize, &bitpos, &offset, &mode, &unsignedp, &reversep, &volatilep); tree orig_base = base; if ((TREE_CODE (base) == INDIRECT_REF || (TREE_CODE (base) == MEM_REF && integer_zerop (TREE_OPERAND (base, 1)))) && DECL_P (TREE_OPERAND (base, 0)) && TREE_CODE (TREE_TYPE (TREE_OPERAND (base, 0))) == REFERENCE_TYPE) base = TREE_OPERAND (base, 0); gcc_assert (offset == NULL_TREE || poly_int_tree_p (offset)); if (offset) poffset = wi::to_poly_offset (offset); else poffset = 0; if (maybe_ne (bitpos, 0)) poffset += bits_to_bytes_round_down (bitpos); *bitposp = bitpos; *poffsetp = poffset; /* Set *BASE_REF if BASE was a dereferenced reference variable. */ if (base_ref && orig_base != base) *base_ref = orig_base; return base; } /* Scan the OMP clauses in *LIST_P, installing mappings into a new and previous omp contexts. */ static void gimplify_scan_omp_clauses (tree *list_p, gimple_seq *pre_p, enum omp_region_type region_type, enum tree_code code) { struct gimplify_omp_ctx *ctx, *outer_ctx; tree c; hash_map<tree, tree> *struct_map_to_clause = NULL; hash_set<tree> *struct_deref_set = NULL; tree *prev_list_p = NULL, *orig_list_p = list_p; int handled_depend_iterators = -1; int nowait = -1; ctx = new_omp_context (region_type); ctx->code = code; outer_ctx = ctx->outer_context; if (code == OMP_TARGET) { if (!lang_GNU_Fortran ()) ctx->defaultmap[GDMK_POINTER] = GOVD_MAP | GOVD_MAP_0LEN_ARRAY; ctx->defaultmap[GDMK_SCALAR] = GOVD_FIRSTPRIVATE; } if (!lang_GNU_Fortran ()) switch (code) { case OMP_TARGET: case OMP_TARGET_DATA: case OMP_TARGET_ENTER_DATA: case OMP_TARGET_EXIT_DATA: case OACC_DECLARE: case OACC_HOST_DATA: case OACC_PARALLEL: case OACC_KERNELS: ctx->target_firstprivatize_array_bases = true; default: break; } while ((c = *list_p) != NULL) { bool remove = false; bool notice_outer = true; const char *check_non_private = NULL; unsigned int flags; tree decl; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_PRIVATE: flags = GOVD_PRIVATE | GOVD_EXPLICIT; if (lang_hooks.decls.omp_private_outer_ref (OMP_CLAUSE_DECL (c))) { flags |= GOVD_PRIVATE_OUTER_REF; OMP_CLAUSE_PRIVATE_OUTER_REF (c) = 1; } else notice_outer = false; goto do_add; case OMP_CLAUSE_SHARED: flags = GOVD_SHARED | GOVD_EXPLICIT; goto do_add; case OMP_CLAUSE_FIRSTPRIVATE: flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT; check_non_private = "firstprivate"; goto do_add; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)) switch (code) { case OMP_DISTRIBUTE: error_at (OMP_CLAUSE_LOCATION (c), "conditional %<lastprivate%> clause on " "%qs construct", "distribute"); OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = 0; break; case OMP_TASKLOOP: error_at (OMP_CLAUSE_LOCATION (c), "conditional %<lastprivate%> clause on " "%qs construct", "taskloop"); OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = 0; break; default: break; } flags = GOVD_LASTPRIVATE | GOVD_SEEN | GOVD_EXPLICIT; if (code != OMP_LOOP) check_non_private = "lastprivate"; decl = OMP_CLAUSE_DECL (c); if (error_operand_p (decl)) goto do_add; if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) && !lang_hooks.decls.omp_scalar_p (decl)) { error_at (OMP_CLAUSE_LOCATION (c), "non-scalar variable %qD in conditional " "%<lastprivate%> clause", decl); OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = 0; } if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)) flags |= GOVD_LASTPRIVATE_CONDITIONAL; if (outer_ctx && (outer_ctx->region_type == ORT_COMBINED_PARALLEL || ((outer_ctx->region_type & ORT_COMBINED_TEAMS) == ORT_COMBINED_TEAMS)) && splay_tree_lookup (outer_ctx->variables, (splay_tree_key) decl) == NULL) { omp_add_variable (outer_ctx, decl, GOVD_SHARED | GOVD_SEEN); if (outer_ctx->outer_context) omp_notice_variable (outer_ctx->outer_context, decl, true); } else if (outer_ctx && (outer_ctx->region_type & ORT_TASK) != 0 && outer_ctx->combined_loop && splay_tree_lookup (outer_ctx->variables, (splay_tree_key) decl) == NULL) { omp_add_variable (outer_ctx, decl, GOVD_LASTPRIVATE | GOVD_SEEN); if (outer_ctx->outer_context) omp_notice_variable (outer_ctx->outer_context, decl, true); } else if (outer_ctx && (outer_ctx->region_type == ORT_WORKSHARE || outer_ctx->region_type == ORT_ACC) && outer_ctx->combined_loop && splay_tree_lookup (outer_ctx->variables, (splay_tree_key) decl) == NULL && !omp_check_private (outer_ctx, decl, false)) { omp_add_variable (outer_ctx, decl, GOVD_LASTPRIVATE | GOVD_SEEN); if (outer_ctx->outer_context && (outer_ctx->outer_context->region_type == ORT_COMBINED_PARALLEL) && splay_tree_lookup (outer_ctx->outer_context->variables, (splay_tree_key) decl) == NULL) { struct gimplify_omp_ctx *octx = outer_ctx->outer_context; omp_add_variable (octx, decl, GOVD_SHARED | GOVD_SEEN); if (octx->outer_context) { octx = octx->outer_context; if (octx->region_type == ORT_WORKSHARE && octx->combined_loop && splay_tree_lookup (octx->variables, (splay_tree_key) decl) == NULL && !omp_check_private (octx, decl, false)) { omp_add_variable (octx, decl, GOVD_LASTPRIVATE | GOVD_SEEN); octx = octx->outer_context; if (octx && ((octx->region_type & ORT_COMBINED_TEAMS) == ORT_COMBINED_TEAMS) && (splay_tree_lookup (octx->variables, (splay_tree_key) decl) == NULL)) { omp_add_variable (octx, decl, GOVD_SHARED | GOVD_SEEN); octx = octx->outer_context; } } if (octx) omp_notice_variable (octx, decl, true); } } else if (outer_ctx->outer_context) omp_notice_variable (outer_ctx->outer_context, decl, true); } goto do_add; case OMP_CLAUSE_REDUCTION: if (OMP_CLAUSE_REDUCTION_TASK (c)) { if (region_type == ORT_WORKSHARE) { if (nowait == -1) nowait = omp_find_clause (*list_p, OMP_CLAUSE_NOWAIT) != NULL_TREE; if (nowait && (outer_ctx == NULL || outer_ctx->region_type != ORT_COMBINED_PARALLEL)) { error_at (OMP_CLAUSE_LOCATION (c), "%<task%> reduction modifier on a construct " "with a %<nowait%> clause"); OMP_CLAUSE_REDUCTION_TASK (c) = 0; } } else if ((region_type & ORT_PARALLEL) != ORT_PARALLEL) { error_at (OMP_CLAUSE_LOCATION (c), "invalid %<task%> reduction modifier on construct " "other than %<parallel%>, %<for%> or %<sections%>"); OMP_CLAUSE_REDUCTION_TASK (c) = 0; } } if (OMP_CLAUSE_REDUCTION_INSCAN (c)) switch (code) { case OMP_SECTIONS: error_at (OMP_CLAUSE_LOCATION (c), "%<inscan%> %<reduction%> clause on " "%qs construct", "sections"); OMP_CLAUSE_REDUCTION_INSCAN (c) = 0; break; case OMP_PARALLEL: error_at (OMP_CLAUSE_LOCATION (c), "%<inscan%> %<reduction%> clause on " "%qs construct", "parallel"); OMP_CLAUSE_REDUCTION_INSCAN (c) = 0; break; case OMP_TEAMS: error_at (OMP_CLAUSE_LOCATION (c), "%<inscan%> %<reduction%> clause on " "%qs construct", "teams"); OMP_CLAUSE_REDUCTION_INSCAN (c) = 0; break; case OMP_TASKLOOP: error_at (OMP_CLAUSE_LOCATION (c), "%<inscan%> %<reduction%> clause on " "%qs construct", "taskloop"); OMP_CLAUSE_REDUCTION_INSCAN (c) = 0; break; default: break; } /* FALLTHRU */ case OMP_CLAUSE_IN_REDUCTION: case OMP_CLAUSE_TASK_REDUCTION: flags = GOVD_REDUCTION | GOVD_SEEN | GOVD_EXPLICIT; /* OpenACC permits reductions on private variables. */ if (!(region_type & ORT_ACC) /* taskgroup is actually not a worksharing region. */ && code != OMP_TASKGROUP) check_non_private = omp_clause_code_name[OMP_CLAUSE_CODE (c)]; decl = OMP_CLAUSE_DECL (c); if (TREE_CODE (decl) == MEM_REF) { tree type = TREE_TYPE (decl); if (gimplify_expr (&TYPE_MAX_VALUE (TYPE_DOMAIN (type)), pre_p, NULL, is_gimple_val, fb_rvalue, false) == GS_ERROR) { remove = true; break; } tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type)); if (DECL_P (v)) { omp_firstprivatize_variable (ctx, v); omp_notice_variable (ctx, v, true); } decl = TREE_OPERAND (decl, 0); if (TREE_CODE (decl) == POINTER_PLUS_EXPR) { if (gimplify_expr (&TREE_OPERAND (decl, 1), pre_p, NULL, is_gimple_val, fb_rvalue, false) == GS_ERROR) { remove = true; break; } v = TREE_OPERAND (decl, 1); if (DECL_P (v)) { omp_firstprivatize_variable (ctx, v); omp_notice_variable (ctx, v, true); } decl = TREE_OPERAND (decl, 0); } if (TREE_CODE (decl) == ADDR_EXPR || TREE_CODE (decl) == INDIRECT_REF) decl = TREE_OPERAND (decl, 0); } goto do_add_decl; case OMP_CLAUSE_LINEAR: if (gimplify_expr (&OMP_CLAUSE_LINEAR_STEP (c), pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) { remove = true; break; } else { if (code == OMP_SIMD && !OMP_CLAUSE_LINEAR_NO_COPYIN (c)) { struct gimplify_omp_ctx *octx = outer_ctx; if (octx && octx->region_type == ORT_WORKSHARE && octx->combined_loop && !octx->distribute) { if (octx->outer_context && (octx->outer_context->region_type == ORT_COMBINED_PARALLEL)) octx = octx->outer_context->outer_context; else octx = octx->outer_context; } if (octx && octx->region_type == ORT_WORKSHARE && octx->combined_loop && octx->distribute) { error_at (OMP_CLAUSE_LOCATION (c), "%<linear%> clause for variable other than " "loop iterator specified on construct " "combined with %<distribute%>"); remove = true; break; } } /* For combined #pragma omp parallel for simd, need to put lastprivate and perhaps firstprivate too on the parallel. Similarly for #pragma omp for simd. */ struct gimplify_omp_ctx *octx = outer_ctx; decl = NULL_TREE; do { if (OMP_CLAUSE_LINEAR_NO_COPYIN (c) && OMP_CLAUSE_LINEAR_NO_COPYOUT (c)) break; decl = OMP_CLAUSE_DECL (c); if (error_operand_p (decl)) { decl = NULL_TREE; break; } flags = GOVD_SEEN; if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c)) flags |= GOVD_FIRSTPRIVATE; if (!OMP_CLAUSE_LINEAR_NO_COPYOUT (c)) flags |= GOVD_LASTPRIVATE; if (octx && octx->region_type == ORT_WORKSHARE && octx->combined_loop) { if (octx->outer_context && (octx->outer_context->region_type == ORT_COMBINED_PARALLEL)) octx = octx->outer_context; else if (omp_check_private (octx, decl, false)) break; } else if (octx && (octx->region_type & ORT_TASK) != 0 && octx->combined_loop) ; else if (octx && octx->region_type == ORT_COMBINED_PARALLEL && ctx->region_type == ORT_WORKSHARE && octx == outer_ctx) flags = GOVD_SEEN | GOVD_SHARED; else if (octx && ((octx->region_type & ORT_COMBINED_TEAMS) == ORT_COMBINED_TEAMS)) flags = GOVD_SEEN | GOVD_SHARED; else if (octx && octx->region_type == ORT_COMBINED_TARGET) { flags &= ~GOVD_LASTPRIVATE; if (flags == GOVD_SEEN) break; } else break; splay_tree_node on = splay_tree_lookup (octx->variables, (splay_tree_key) decl); if (on && (on->value & GOVD_DATA_SHARE_CLASS) != 0) { octx = NULL; break; } omp_add_variable (octx, decl, flags); if (octx->outer_context == NULL) break; octx = octx->outer_context; } while (1); if (octx && decl && (!OMP_CLAUSE_LINEAR_NO_COPYIN (c) || !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))) omp_notice_variable (octx, decl, true); } flags = GOVD_LINEAR | GOVD_EXPLICIT; if (OMP_CLAUSE_LINEAR_NO_COPYIN (c) && OMP_CLAUSE_LINEAR_NO_COPYOUT (c)) { notice_outer = false; flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER; } goto do_add; case OMP_CLAUSE_MAP: decl = OMP_CLAUSE_DECL (c); if (error_operand_p (decl)) remove = true; switch (code) { case OMP_TARGET: break; case OACC_DATA: if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE) break; /* FALLTHRU */ case OMP_TARGET_DATA: case OMP_TARGET_ENTER_DATA: case OMP_TARGET_EXIT_DATA: case OACC_ENTER_DATA: case OACC_EXIT_DATA: case OACC_HOST_DATA: if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER || (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)) /* For target {,enter ,exit }data only the array slice is mapped, but not the pointer to it. */ remove = true; break; default: break; } /* For Fortran, not only the pointer to the data is mapped but also the address of the pointer, the array descriptor etc.; for 'exit data' - and in particular for 'delete:' - having an 'alloc:' does not make sense. Likewise, for 'update' only transferring the data itself is needed as the rest has been handled in previous directives. However, for 'exit data', the array descriptor needs to be delete; hence, we turn the MAP_TO_PSET into a MAP_DELETE. NOTE: Generally, it is not safe to perform "enter data" operations on arrays where the data *or the descriptor* may go out of scope before a corresponding "exit data" operation -- and such a descriptor may be synthesized temporarily, e.g. to pass an explicit-shape array to a function expecting an assumed-shape argument. Performing "enter data" inside the called function would thus be problematic. */ if (code == OMP_TARGET_EXIT_DATA && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_TO_PSET) OMP_CLAUSE_SET_MAP_KIND (c, OMP_CLAUSE_MAP_KIND (*prev_list_p) == GOMP_MAP_DELETE ? GOMP_MAP_DELETE : GOMP_MAP_RELEASE); else if ((code == OMP_TARGET_EXIT_DATA || code == OMP_TARGET_UPDATE) && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_TO_PSET)) remove = true; if (remove) break; if (DECL_P (decl) && outer_ctx && (region_type & ORT_ACC)) { struct gimplify_omp_ctx *octx; for (octx = outer_ctx; octx; octx = octx->outer_context) { if (octx->region_type != ORT_ACC_HOST_DATA) break; splay_tree_node n2 = splay_tree_lookup (octx->variables, (splay_tree_key) decl); if (n2) error_at (OMP_CLAUSE_LOCATION (c), "variable %qE " "declared in enclosing %<host_data%> region", DECL_NAME (decl)); } } if (OMP_CLAUSE_SIZE (c) == NULL_TREE) OMP_CLAUSE_SIZE (c) = DECL_P (decl) ? DECL_SIZE_UNIT (decl) : TYPE_SIZE_UNIT (TREE_TYPE (decl)); if (gimplify_expr (&OMP_CLAUSE_SIZE (c), pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) { remove = true; break; } else if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER || (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)) && TREE_CODE (OMP_CLAUSE_SIZE (c)) != INTEGER_CST) { OMP_CLAUSE_SIZE (c) = get_initialized_tmp_var (OMP_CLAUSE_SIZE (c), pre_p, NULL, false); omp_add_variable (ctx, OMP_CLAUSE_SIZE (c), GOVD_FIRSTPRIVATE | GOVD_SEEN); } if (!DECL_P (decl)) { tree d = decl, *pd; if (TREE_CODE (d) == ARRAY_REF) { while (TREE_CODE (d) == ARRAY_REF) d = TREE_OPERAND (d, 0); if (TREE_CODE (d) == COMPONENT_REF && TREE_CODE (TREE_TYPE (d)) == ARRAY_TYPE) decl = d; } pd = &OMP_CLAUSE_DECL (c); if (d == decl && TREE_CODE (decl) == INDIRECT_REF && TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0))) == REFERENCE_TYPE)) { pd = &TREE_OPERAND (decl, 0); decl = TREE_OPERAND (decl, 0); } bool indir_p = false; tree orig_decl = decl; tree decl_ref = NULL_TREE; if ((region_type & ORT_ACC) != 0 && TREE_CODE (*pd) == COMPONENT_REF && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH && code != OACC_UPDATE) { while (TREE_CODE (decl) == COMPONENT_REF) { decl = TREE_OPERAND (decl, 0); if ((TREE_CODE (decl) == MEM_REF && integer_zerop (TREE_OPERAND (decl, 1))) || INDIRECT_REF_P (decl)) { indir_p = true; decl = TREE_OPERAND (decl, 0); } if (TREE_CODE (decl) == INDIRECT_REF && DECL_P (TREE_OPERAND (decl, 0)) && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0))) == REFERENCE_TYPE)) { decl_ref = decl; decl = TREE_OPERAND (decl, 0); } } } else if (TREE_CODE (decl) == COMPONENT_REF) { while (TREE_CODE (decl) == COMPONENT_REF) decl = TREE_OPERAND (decl, 0); if (TREE_CODE (decl) == INDIRECT_REF && DECL_P (TREE_OPERAND (decl, 0)) && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0))) == REFERENCE_TYPE)) decl = TREE_OPERAND (decl, 0); } if (decl != orig_decl && DECL_P (decl) && indir_p) { gomp_map_kind k = (code == OACC_EXIT_DATA) ? GOMP_MAP_DETACH : GOMP_MAP_ATTACH; /* We have a dereference of a struct member. Make this an attach/detach operation, and ensure the base pointer is mapped as a FIRSTPRIVATE_POINTER. */ OMP_CLAUSE_SET_MAP_KIND (c, k); flags = GOVD_MAP | GOVD_SEEN | GOVD_EXPLICIT; tree next_clause = OMP_CLAUSE_CHAIN (c); if (k == GOMP_MAP_ATTACH && code != OACC_ENTER_DATA && (!next_clause || (OMP_CLAUSE_CODE (next_clause) != OMP_CLAUSE_MAP) || (OMP_CLAUSE_MAP_KIND (next_clause) != GOMP_MAP_POINTER) || OMP_CLAUSE_DECL (next_clause) != decl) && (!struct_deref_set || !struct_deref_set->contains (decl))) { if (!struct_deref_set) struct_deref_set = new hash_set<tree> (); /* As well as the attach, we also need a FIRSTPRIVATE_POINTER clause to properly map the pointer to the struct base. */ tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_ALLOC); OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c2) = 1; tree charptr_zero = build_int_cst (build_pointer_type (char_type_node), 0); OMP_CLAUSE_DECL (c2) = build2 (MEM_REF, char_type_node, decl_ref ? decl_ref : decl, charptr_zero); OMP_CLAUSE_SIZE (c2) = size_zero_node; tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (c3, GOMP_MAP_FIRSTPRIVATE_POINTER); OMP_CLAUSE_DECL (c3) = decl; OMP_CLAUSE_SIZE (c3) = size_zero_node; tree mapgrp = *prev_list_p; *prev_list_p = c2; OMP_CLAUSE_CHAIN (c3) = mapgrp; OMP_CLAUSE_CHAIN (c2) = c3; struct_deref_set->add (decl); } goto do_add_decl; } /* An "attach/detach" operation on an update directive should behave as a GOMP_MAP_ALWAYS_POINTER. Beware that unlike attach or detach map kinds, GOMP_MAP_ALWAYS_POINTER depends on the previous mapping. */ if (code == OACC_UPDATE && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH) OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_ALWAYS_POINTER); if (gimplify_expr (pd, pre_p, NULL, is_gimple_lvalue, fb_lvalue) == GS_ERROR) { remove = true; break; } if (DECL_P (decl) && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_TO_PSET && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ATTACH && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_DETACH && code != OACC_UPDATE) { if (error_operand_p (decl)) { remove = true; break; } tree stype = TREE_TYPE (decl); if (TREE_CODE (stype) == REFERENCE_TYPE) stype = TREE_TYPE (stype); if (TYPE_SIZE_UNIT (stype) == NULL || TREE_CODE (TYPE_SIZE_UNIT (stype)) != INTEGER_CST) { error_at (OMP_CLAUSE_LOCATION (c), "mapping field %qE of variable length " "structure", OMP_CLAUSE_DECL (c)); remove = true; break; } if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH) { /* Error recovery. */ if (prev_list_p == NULL) { remove = true; break; } if (OMP_CLAUSE_CHAIN (*prev_list_p) != c) { tree ch = OMP_CLAUSE_CHAIN (*prev_list_p); if (ch == NULL_TREE || OMP_CLAUSE_CHAIN (ch) != c) { remove = true; break; } } } poly_offset_int offset1; poly_int64 bitpos1; tree base_ref; tree base = extract_base_bit_offset (OMP_CLAUSE_DECL (c), &base_ref, &bitpos1, &offset1); gcc_assert (base == decl); splay_tree_node n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl); bool ptr = (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER); bool attach_detach = (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH); bool attach = OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH; bool has_attachments = false; /* For OpenACC, pointers in structs should trigger an attach action. */ if (attach_detach && (region_type & ORT_ACC) != 0) { /* Turn a GOMP_MAP_ATTACH_DETACH clause into a GOMP_MAP_ATTACH or GOMP_MAP_DETACH clause after we have detected a case that needs a GOMP_MAP_STRUCT mapping added. */ gomp_map_kind k = (code == OACC_EXIT_DATA) ? GOMP_MAP_DETACH : GOMP_MAP_ATTACH; OMP_CLAUSE_SET_MAP_KIND (c, k); has_attachments = true; } if (n == NULL || (n->value & GOVD_MAP) == 0) { tree l = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); gomp_map_kind k = attach ? GOMP_MAP_FORCE_PRESENT : GOMP_MAP_STRUCT; OMP_CLAUSE_SET_MAP_KIND (l, k); if (base_ref) OMP_CLAUSE_DECL (l) = unshare_expr (base_ref); else OMP_CLAUSE_DECL (l) = decl; OMP_CLAUSE_SIZE (l) = (!attach ? size_int (1) : DECL_P (OMP_CLAUSE_DECL (l)) ? DECL_SIZE_UNIT (OMP_CLAUSE_DECL (l)) : TYPE_SIZE_UNIT (TREE_TYPE (OMP_CLAUSE_DECL (l)))); if (struct_map_to_clause == NULL) struct_map_to_clause = new hash_map<tree, tree>; struct_map_to_clause->put (decl, l); if (ptr || attach_detach) { insert_struct_comp_map (code, c, l, *prev_list_p, NULL); *prev_list_p = l; prev_list_p = NULL; } else { OMP_CLAUSE_CHAIN (l) = c; *list_p = l; list_p = &OMP_CLAUSE_CHAIN (l); } if (base_ref && code == OMP_TARGET) { tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); enum gomp_map_kind mkind = GOMP_MAP_FIRSTPRIVATE_REFERENCE; OMP_CLAUSE_SET_MAP_KIND (c2, mkind); OMP_CLAUSE_DECL (c2) = decl; OMP_CLAUSE_SIZE (c2) = size_zero_node; OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (l); OMP_CLAUSE_CHAIN (l) = c2; } flags = GOVD_MAP | GOVD_EXPLICIT; if (GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c)) || ptr || attach_detach) flags |= GOVD_SEEN; if (has_attachments) flags |= GOVD_MAP_HAS_ATTACHMENTS; goto do_add_decl; } else if (struct_map_to_clause) { tree *osc = struct_map_to_clause->get (decl); tree *sc = NULL, *scp = NULL; if (GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c)) || ptr || attach_detach) n->value |= GOVD_SEEN; sc = &OMP_CLAUSE_CHAIN (*osc); if (*sc != c && (OMP_CLAUSE_MAP_KIND (*sc) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)) sc = &OMP_CLAUSE_CHAIN (*sc); /* Here "prev_list_p" is the end of the inserted alloc/release nodes after the struct node, OSC. */ for (; *sc != c; sc = &OMP_CLAUSE_CHAIN (*sc)) if ((ptr || attach_detach) && sc == prev_list_p) break; else if (TREE_CODE (OMP_CLAUSE_DECL (*sc)) != COMPONENT_REF && (TREE_CODE (OMP_CLAUSE_DECL (*sc)) != INDIRECT_REF) && (TREE_CODE (OMP_CLAUSE_DECL (*sc)) != ARRAY_REF)) break; else { tree sc_decl = OMP_CLAUSE_DECL (*sc); poly_offset_int offsetn; poly_int64 bitposn; tree base = extract_base_bit_offset (sc_decl, NULL, &bitposn, &offsetn); if (base != decl) break; if (scp) continue; tree d1 = OMP_CLAUSE_DECL (*sc); tree d2 = OMP_CLAUSE_DECL (c); while (TREE_CODE (d1) == ARRAY_REF) d1 = TREE_OPERAND (d1, 0); while (TREE_CODE (d2) == ARRAY_REF) d2 = TREE_OPERAND (d2, 0); if (TREE_CODE (d1) == INDIRECT_REF) d1 = TREE_OPERAND (d1, 0); if (TREE_CODE (d2) == INDIRECT_REF) d2 = TREE_OPERAND (d2, 0); while (TREE_CODE (d1) == COMPONENT_REF) if (TREE_CODE (d2) == COMPONENT_REF && TREE_OPERAND (d1, 1) == TREE_OPERAND (d2, 1)) { d1 = TREE_OPERAND (d1, 0); d2 = TREE_OPERAND (d2, 0); } else break; if (d1 == d2) { error_at (OMP_CLAUSE_LOCATION (c), "%qE appears more than once in map " "clauses", OMP_CLAUSE_DECL (c)); remove = true; break; } if (maybe_lt (offset1, offsetn) || (known_eq (offset1, offsetn) && maybe_lt (bitpos1, bitposn))) { if (ptr || attach_detach) scp = sc; else break; } } if (remove) break; if (!attach) OMP_CLAUSE_SIZE (*osc) = size_binop (PLUS_EXPR, OMP_CLAUSE_SIZE (*osc), size_one_node); if (ptr || attach_detach) { tree cl = insert_struct_comp_map (code, c, NULL, *prev_list_p, scp); if (sc == prev_list_p) { *sc = cl; prev_list_p = NULL; } else { *prev_list_p = OMP_CLAUSE_CHAIN (c); list_p = prev_list_p; prev_list_p = NULL; OMP_CLAUSE_CHAIN (c) = *sc; *sc = cl; continue; } } else if (*sc != c) { *list_p = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = *sc; *sc = c; continue; } } } if (!remove && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_POINTER && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ATTACH_DETACH && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_TO_PSET && OMP_CLAUSE_CHAIN (c) && OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (c)) == OMP_CLAUSE_MAP && ((OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c)) == GOMP_MAP_ALWAYS_POINTER) || (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c)) == GOMP_MAP_ATTACH_DETACH) || (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c)) == GOMP_MAP_TO_PSET))) prev_list_p = list_p; break; } flags = GOVD_MAP | GOVD_EXPLICIT; if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_TO || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_TOFROM) flags |= GOVD_MAP_ALWAYS_TO; goto do_add; case OMP_CLAUSE_DEPEND: if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK) { tree deps = OMP_CLAUSE_DECL (c); while (deps && TREE_CODE (deps) == TREE_LIST) { if (TREE_CODE (TREE_PURPOSE (deps)) == TRUNC_DIV_EXPR && DECL_P (TREE_OPERAND (TREE_PURPOSE (deps), 1))) gimplify_expr (&TREE_OPERAND (TREE_PURPOSE (deps), 1), pre_p, NULL, is_gimple_val, fb_rvalue); deps = TREE_CHAIN (deps); } break; } else if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE) break; if (handled_depend_iterators == -1) handled_depend_iterators = gimplify_omp_depend (list_p, pre_p); if (handled_depend_iterators) { if (handled_depend_iterators == 2) remove = true; break; } if (TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPOUND_EXPR) { gimplify_expr (&TREE_OPERAND (OMP_CLAUSE_DECL (c), 0), pre_p, NULL, is_gimple_val, fb_rvalue); OMP_CLAUSE_DECL (c) = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1); } if (error_operand_p (OMP_CLAUSE_DECL (c))) { remove = true; break; } OMP_CLAUSE_DECL (c) = build_fold_addr_expr (OMP_CLAUSE_DECL (c)); if (gimplify_expr (&OMP_CLAUSE_DECL (c), pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) { remove = true; break; } break; case OMP_CLAUSE_TO: case OMP_CLAUSE_FROM: case OMP_CLAUSE__CACHE_: decl = OMP_CLAUSE_DECL (c); if (error_operand_p (decl)) { remove = true; break; } if (OMP_CLAUSE_SIZE (c) == NULL_TREE) OMP_CLAUSE_SIZE (c) = DECL_P (decl) ? DECL_SIZE_UNIT (decl) : TYPE_SIZE_UNIT (TREE_TYPE (decl)); if (gimplify_expr (&OMP_CLAUSE_SIZE (c), pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) { remove = true; break; } if (!DECL_P (decl)) { if (gimplify_expr (&OMP_CLAUSE_DECL (c), pre_p, NULL, is_gimple_lvalue, fb_lvalue) == GS_ERROR) { remove = true; break; } break; } goto do_notice; case OMP_CLAUSE_USE_DEVICE_PTR: case OMP_CLAUSE_USE_DEVICE_ADDR: flags = GOVD_EXPLICIT; goto do_add; case OMP_CLAUSE_IS_DEVICE_PTR: flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT; goto do_add; do_add: decl = OMP_CLAUSE_DECL (c); do_add_decl: if (error_operand_p (decl)) { remove = true; break; } if (DECL_NAME (decl) == NULL_TREE && (flags & GOVD_SHARED) == 0) { tree t = omp_member_access_dummy_var (decl); if (t) { tree v = DECL_VALUE_EXPR (decl); DECL_NAME (decl) = DECL_NAME (TREE_OPERAND (v, 1)); if (outer_ctx) omp_notice_variable (outer_ctx, t, true); } } if (code == OACC_DATA && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER) flags |= GOVD_MAP_0LEN_ARRAY; omp_add_variable (ctx, decl, flags); if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) { omp_add_variable (ctx, OMP_CLAUSE_REDUCTION_PLACEHOLDER (c), GOVD_LOCAL | GOVD_SEEN); if (OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) && walk_tree (&OMP_CLAUSE_REDUCTION_INIT (c), find_decl_expr, OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c), NULL) == NULL_TREE) omp_add_variable (ctx, OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c), GOVD_LOCAL | GOVD_SEEN); gimplify_omp_ctxp = ctx; push_gimplify_context (); OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL; OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL; gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), &OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c)); pop_gimplify_context (gimple_seq_first_stmt (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))); push_gimplify_context (); gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c), &OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c)); pop_gimplify_context (gimple_seq_first_stmt (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c))); OMP_CLAUSE_REDUCTION_INIT (c) = NULL_TREE; OMP_CLAUSE_REDUCTION_MERGE (c) = NULL_TREE; gimplify_omp_ctxp = outer_ctx; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_STMT (c)) { gimplify_omp_ctxp = ctx; push_gimplify_context (); if (TREE_CODE (OMP_CLAUSE_LASTPRIVATE_STMT (c)) != BIND_EXPR) { tree bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; BIND_EXPR_BODY (bind) = OMP_CLAUSE_LASTPRIVATE_STMT (c); OMP_CLAUSE_LASTPRIVATE_STMT (c) = bind; } gimplify_and_add (OMP_CLAUSE_LASTPRIVATE_STMT (c), &OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c)); pop_gimplify_context (gimple_seq_first_stmt (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))); OMP_CLAUSE_LASTPRIVATE_STMT (c) = NULL_TREE; gimplify_omp_ctxp = outer_ctx; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_LINEAR_STMT (c)) { gimplify_omp_ctxp = ctx; push_gimplify_context (); if (TREE_CODE (OMP_CLAUSE_LINEAR_STMT (c)) != BIND_EXPR) { tree bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; BIND_EXPR_BODY (bind) = OMP_CLAUSE_LINEAR_STMT (c); OMP_CLAUSE_LINEAR_STMT (c) = bind; } gimplify_and_add (OMP_CLAUSE_LINEAR_STMT (c), &OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c)); pop_gimplify_context (gimple_seq_first_stmt (OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))); OMP_CLAUSE_LINEAR_STMT (c) = NULL_TREE; gimplify_omp_ctxp = outer_ctx; } if (notice_outer) goto do_notice; break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: decl = OMP_CLAUSE_DECL (c); if (error_operand_p (decl)) { remove = true; break; } if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_COPYPRIVATE && !remove && !omp_check_private (ctx, decl, true)) { remove = true; if (is_global_var (decl)) { if (DECL_THREAD_LOCAL_P (decl)) remove = false; else if (DECL_HAS_VALUE_EXPR_P (decl)) { tree value = get_base_address (DECL_VALUE_EXPR (decl)); if (value && DECL_P (value) && DECL_THREAD_LOCAL_P (value)) remove = false; } } if (remove) error_at (OMP_CLAUSE_LOCATION (c), "copyprivate variable %qE is not threadprivate" " or private in outer context", DECL_NAME (decl)); } do_notice: if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE) && outer_ctx && ((region_type & ORT_TASKLOOP) == ORT_TASKLOOP || (region_type == ORT_WORKSHARE && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && (OMP_CLAUSE_REDUCTION_INSCAN (c) || code == OMP_LOOP))) && (outer_ctx->region_type == ORT_COMBINED_PARALLEL || (code == OMP_LOOP && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && ((outer_ctx->region_type & ORT_COMBINED_TEAMS) == ORT_COMBINED_TEAMS)))) { splay_tree_node on = splay_tree_lookup (outer_ctx->variables, (splay_tree_key)decl); if (on == NULL || (on->value & GOVD_DATA_SHARE_CLASS) == 0) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF && (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE || (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE && (TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == POINTER_TYPE)))) omp_firstprivatize_variable (outer_ctx, decl); else { omp_add_variable (outer_ctx, decl, GOVD_SEEN | GOVD_SHARED); if (outer_ctx->outer_context) omp_notice_variable (outer_ctx->outer_context, decl, true); } } } if (outer_ctx) omp_notice_variable (outer_ctx, decl, true); if (check_non_private && region_type == ORT_WORKSHARE && (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION || decl == OMP_CLAUSE_DECL (c) || (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF && (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0)) == ADDR_EXPR || (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0)) == POINTER_PLUS_EXPR && (TREE_CODE (TREE_OPERAND (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0), 0)) == ADDR_EXPR))))) && omp_check_private (ctx, decl, false)) { error ("%s variable %qE is private in outer context", check_non_private, DECL_NAME (decl)); remove = true; } break; case OMP_CLAUSE_IF: if (OMP_CLAUSE_IF_MODIFIER (c) != ERROR_MARK && OMP_CLAUSE_IF_MODIFIER (c) != code) { const char *p[2]; for (int i = 0; i < 2; i++) switch (i ? OMP_CLAUSE_IF_MODIFIER (c) : code) { case VOID_CST: p[i] = "cancel"; break; case OMP_PARALLEL: p[i] = "parallel"; break; case OMP_SIMD: p[i] = "simd"; break; case OMP_TASK: p[i] = "task"; break; case OMP_TASKLOOP: p[i] = "taskloop"; break; case OMP_TARGET_DATA: p[i] = "target data"; break; case OMP_TARGET: p[i] = "target"; break; case OMP_TARGET_UPDATE: p[i] = "target update"; break; case OMP_TARGET_ENTER_DATA: p[i] = "target enter data"; break; case OMP_TARGET_EXIT_DATA: p[i] = "target exit data"; break; default: gcc_unreachable (); } error_at (OMP_CLAUSE_LOCATION (c), "expected %qs %<if%> clause modifier rather than %qs", p[0], p[1]); remove = true; } /* Fall through. */ case OMP_CLAUSE_FINAL: OMP_CLAUSE_OPERAND (c, 0) = gimple_boolify (OMP_CLAUSE_OPERAND (c, 0)); /* Fall through. */ case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: case OMP_CLAUSE_DIST_SCHEDULE: case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_PRIORITY: case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_HINT: case OMP_CLAUSE_ASYNC: case OMP_CLAUSE_WAIT: case OMP_CLAUSE_NUM_GANGS: case OMP_CLAUSE_NUM_WORKERS: case OMP_CLAUSE_VECTOR_LENGTH: case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: if (gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) remove = true; break; case OMP_CLAUSE_GANG: if (gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) remove = true; if (gimplify_expr (&OMP_CLAUSE_OPERAND (c, 1), pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) remove = true; break; case OMP_CLAUSE_NOWAIT: nowait = 1; break; case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_TILE: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_SEQ: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_THREADS: case OMP_CLAUSE_SIMD: case OMP_CLAUSE_BIND: case OMP_CLAUSE_IF_PRESENT: case OMP_CLAUSE_FINALIZE: break; case OMP_CLAUSE_ORDER: ctx->order_concurrent = true; break; case OMP_CLAUSE_DEFAULTMAP: enum gimplify_defaultmap_kind gdmkmin, gdmkmax; switch (OMP_CLAUSE_DEFAULTMAP_CATEGORY (c)) { case OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED: gdmkmin = GDMK_SCALAR; gdmkmax = GDMK_POINTER; break; case OMP_CLAUSE_DEFAULTMAP_CATEGORY_SCALAR: gdmkmin = gdmkmax = GDMK_SCALAR; break; case OMP_CLAUSE_DEFAULTMAP_CATEGORY_AGGREGATE: gdmkmin = gdmkmax = GDMK_AGGREGATE; break; case OMP_CLAUSE_DEFAULTMAP_CATEGORY_ALLOCATABLE: gdmkmin = gdmkmax = GDMK_ALLOCATABLE; break; case OMP_CLAUSE_DEFAULTMAP_CATEGORY_POINTER: gdmkmin = gdmkmax = GDMK_POINTER; break; default: gcc_unreachable (); } for (int gdmk = gdmkmin; gdmk <= gdmkmax; gdmk++) switch (OMP_CLAUSE_DEFAULTMAP_BEHAVIOR (c)) { case OMP_CLAUSE_DEFAULTMAP_ALLOC: ctx->defaultmap[gdmk] = GOVD_MAP | GOVD_MAP_ALLOC_ONLY; break; case OMP_CLAUSE_DEFAULTMAP_TO: ctx->defaultmap[gdmk] = GOVD_MAP | GOVD_MAP_TO_ONLY; break; case OMP_CLAUSE_DEFAULTMAP_FROM: ctx->defaultmap[gdmk] = GOVD_MAP | GOVD_MAP_FROM_ONLY; break; case OMP_CLAUSE_DEFAULTMAP_TOFROM: ctx->defaultmap[gdmk] = GOVD_MAP; break; case OMP_CLAUSE_DEFAULTMAP_FIRSTPRIVATE: ctx->defaultmap[gdmk] = GOVD_FIRSTPRIVATE; break; case OMP_CLAUSE_DEFAULTMAP_NONE: ctx->defaultmap[gdmk] = 0; break; case OMP_CLAUSE_DEFAULTMAP_DEFAULT: switch (gdmk) { case GDMK_SCALAR: ctx->defaultmap[gdmk] = GOVD_FIRSTPRIVATE; break; case GDMK_AGGREGATE: case GDMK_ALLOCATABLE: ctx->defaultmap[gdmk] = GOVD_MAP; break; case GDMK_POINTER: ctx->defaultmap[gdmk] = GOVD_MAP | GOVD_MAP_0LEN_ARRAY; break; default: gcc_unreachable (); } break; default: gcc_unreachable (); } break; case OMP_CLAUSE_ALIGNED: decl = OMP_CLAUSE_DECL (c); if (error_operand_p (decl)) { remove = true; break; } if (gimplify_expr (&OMP_CLAUSE_ALIGNED_ALIGNMENT (c), pre_p, NULL, is_gimple_val, fb_rvalue) == GS_ERROR) { remove = true; break; } if (!is_global_var (decl) && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE) omp_add_variable (ctx, decl, GOVD_ALIGNED); break; case OMP_CLAUSE_NONTEMPORAL: decl = OMP_CLAUSE_DECL (c); if (error_operand_p (decl)) { remove = true; break; } omp_add_variable (ctx, decl, GOVD_NONTEMPORAL); break; case OMP_CLAUSE_DEFAULT: ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c); break; case OMP_CLAUSE_INCLUSIVE: case OMP_CLAUSE_EXCLUSIVE: decl = OMP_CLAUSE_DECL (c); { splay_tree_node n = splay_tree_lookup (outer_ctx->variables, (splay_tree_key) decl); if (n == NULL || (n->value & GOVD_REDUCTION) == 0) { error_at (OMP_CLAUSE_LOCATION (c), "%qD specified in %qs clause but not in %<inscan%> " "%<reduction%> clause on the containing construct", decl, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else { n->value |= GOVD_REDUCTION_INSCAN; if (outer_ctx->region_type == ORT_SIMD && outer_ctx->outer_context && outer_ctx->outer_context->region_type == ORT_WORKSHARE) { n = splay_tree_lookup (outer_ctx->outer_context->variables, (splay_tree_key) decl); if (n && (n->value & GOVD_REDUCTION) != 0) n->value |= GOVD_REDUCTION_INSCAN; } } } break; default: gcc_unreachable (); } if (code == OACC_DATA && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)) remove = true; if (remove) *list_p = OMP_CLAUSE_CHAIN (c); else list_p = &OMP_CLAUSE_CHAIN (c); } ctx->clauses = *orig_list_p; gimplify_omp_ctxp = ctx; if (struct_map_to_clause) delete struct_map_to_clause; if (struct_deref_set) delete struct_deref_set; } /* Return true if DECL is a candidate for shared to firstprivate optimization. We only consider non-addressable scalars, not too big, and not references. */ static bool omp_shared_to_firstprivate_optimizable_decl_p (tree decl) { if (TREE_ADDRESSABLE (decl)) return false; tree type = TREE_TYPE (decl); if (!is_gimple_reg_type (type) || TREE_CODE (type) == REFERENCE_TYPE || TREE_ADDRESSABLE (type)) return false; /* Don't optimize too large decls, as each thread/task will have its own. */ HOST_WIDE_INT len = int_size_in_bytes (type); if (len == -1 || len > 4 * POINTER_SIZE / BITS_PER_UNIT) return false; if (lang_hooks.decls.omp_privatize_by_reference (decl)) return false; return true; } /* Helper function of omp_find_stores_op and gimplify_adjust_omp_clauses*. For omp_shared_to_firstprivate_optimizable_decl_p decl mark it as GOVD_WRITTEN in outer contexts. */ static void omp_mark_stores (struct gimplify_omp_ctx *ctx, tree decl) { for (; ctx; ctx = ctx->outer_context) { splay_tree_node n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if (n == NULL) continue; else if (n->value & GOVD_SHARED) { n->value |= GOVD_WRITTEN; return; } else if (n->value & GOVD_DATA_SHARE_CLASS) return; } } /* Helper callback for walk_gimple_seq to discover possible stores to omp_shared_to_firstprivate_optimizable_decl_p decls and set GOVD_WRITTEN if they are GOVD_SHARED in some outer context for those. */ static tree omp_find_stores_op (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = (struct walk_stmt_info *) data; *walk_subtrees = 0; if (!wi->is_lhs) return NULL_TREE; tree op = *tp; do { if (handled_component_p (op)) op = TREE_OPERAND (op, 0); else if ((TREE_CODE (op) == MEM_REF || TREE_CODE (op) == TARGET_MEM_REF) && TREE_CODE (TREE_OPERAND (op, 0)) == ADDR_EXPR) op = TREE_OPERAND (TREE_OPERAND (op, 0), 0); else break; } while (1); if (!DECL_P (op) || !omp_shared_to_firstprivate_optimizable_decl_p (op)) return NULL_TREE; omp_mark_stores (gimplify_omp_ctxp, op); return NULL_TREE; } /* Helper callback for walk_gimple_seq to discover possible stores to omp_shared_to_firstprivate_optimizable_decl_p decls and set GOVD_WRITTEN if they are GOVD_SHARED in some outer context for those. */ static tree omp_find_stores_stmt (gimple_stmt_iterator *gsi_p, bool *handled_ops_p, struct walk_stmt_info *wi) { gimple *stmt = gsi_stmt (*gsi_p); switch (gimple_code (stmt)) { /* Don't recurse on OpenMP constructs for which gimplify_adjust_omp_clauses already handled the bodies, except handle gimple_omp_for_pre_body. */ case GIMPLE_OMP_FOR: *handled_ops_p = true; if (gimple_omp_for_pre_body (stmt)) walk_gimple_seq (gimple_omp_for_pre_body (stmt), omp_find_stores_stmt, omp_find_stores_op, wi); break; case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_SECTIONS: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_TARGET: case GIMPLE_OMP_TEAMS: case GIMPLE_OMP_CRITICAL: *handled_ops_p = true; break; default: break; } return NULL_TREE; } struct gimplify_adjust_omp_clauses_data { tree *list_p; gimple_seq *pre_p; }; /* For all variables that were not actually used within the context, remove PRIVATE, SHARED, and FIRSTPRIVATE clauses. */ static int gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data) { tree *list_p = ((struct gimplify_adjust_omp_clauses_data *) data)->list_p; gimple_seq *pre_p = ((struct gimplify_adjust_omp_clauses_data *) data)->pre_p; tree decl = (tree) n->key; unsigned flags = n->value; enum omp_clause_code code; tree clause; bool private_debug; if (gimplify_omp_ctxp->region_type == ORT_COMBINED_PARALLEL && (flags & GOVD_LASTPRIVATE_CONDITIONAL) != 0) flags = GOVD_SHARED | GOVD_SEEN | GOVD_WRITTEN; if (flags & (GOVD_EXPLICIT | GOVD_LOCAL)) return 0; if ((flags & GOVD_SEEN) == 0) return 0; if ((flags & GOVD_MAP_HAS_ATTACHMENTS) != 0) return 0; if (flags & GOVD_DEBUG_PRIVATE) { gcc_assert ((flags & GOVD_DATA_SHARE_CLASS) == GOVD_SHARED); private_debug = true; } else if (flags & GOVD_MAP) private_debug = false; else private_debug = lang_hooks.decls.omp_private_debug_clause (decl, !!(flags & GOVD_SHARED)); if (private_debug) code = OMP_CLAUSE_PRIVATE; else if (flags & GOVD_MAP) { code = OMP_CLAUSE_MAP; if ((gimplify_omp_ctxp->region_type & ORT_ACC) == 0 && TYPE_ATOMIC (strip_array_types (TREE_TYPE (decl)))) { error ("%<_Atomic%> %qD in implicit %<map%> clause", decl); return 0; } if (VAR_P (decl) && DECL_IN_CONSTANT_POOL (decl) && !lookup_attribute ("omp declare target", DECL_ATTRIBUTES (decl))) { tree id = get_identifier ("omp declare target"); DECL_ATTRIBUTES (decl) = tree_cons (id, NULL_TREE, DECL_ATTRIBUTES (decl)); varpool_node *node = varpool_node::get (decl); if (node) { node->offloadable = 1; if (ENABLE_OFFLOADING) g->have_offload = true; } } } else if (flags & GOVD_SHARED) { if (is_global_var (decl)) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp->outer_context; while (ctx != NULL) { splay_tree_node on = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if (on && (on->value & (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE | GOVD_PRIVATE | GOVD_REDUCTION | GOVD_LINEAR | GOVD_MAP)) != 0) break; ctx = ctx->outer_context; } if (ctx == NULL) return 0; } code = OMP_CLAUSE_SHARED; } else if (flags & GOVD_PRIVATE) code = OMP_CLAUSE_PRIVATE; else if (flags & GOVD_FIRSTPRIVATE) { code = OMP_CLAUSE_FIRSTPRIVATE; if ((gimplify_omp_ctxp->region_type & ORT_TARGET) && (gimplify_omp_ctxp->region_type & ORT_ACC) == 0 && TYPE_ATOMIC (strip_array_types (TREE_TYPE (decl)))) { error ("%<_Atomic%> %qD in implicit %<firstprivate%> clause on " "%<target%> construct", decl); return 0; } } else if (flags & GOVD_LASTPRIVATE) code = OMP_CLAUSE_LASTPRIVATE; else if (flags & (GOVD_ALIGNED | GOVD_NONTEMPORAL)) return 0; else if (flags & GOVD_CONDTEMP) { code = OMP_CLAUSE__CONDTEMP_; gimple_add_tmp_var (decl); } else gcc_unreachable (); if (((flags & GOVD_LASTPRIVATE) || (code == OMP_CLAUSE_SHARED && (flags & GOVD_WRITTEN))) && omp_shared_to_firstprivate_optimizable_decl_p (decl)) omp_mark_stores (gimplify_omp_ctxp->outer_context, decl); tree chain = *list_p; clause = build_omp_clause (input_location, code); OMP_CLAUSE_DECL (clause) = decl; OMP_CLAUSE_CHAIN (clause) = chain; if (private_debug) OMP_CLAUSE_PRIVATE_DEBUG (clause) = 1; else if (code == OMP_CLAUSE_PRIVATE && (flags & GOVD_PRIVATE_OUTER_REF)) OMP_CLAUSE_PRIVATE_OUTER_REF (clause) = 1; else if (code == OMP_CLAUSE_SHARED && (flags & GOVD_WRITTEN) == 0 && omp_shared_to_firstprivate_optimizable_decl_p (decl)) OMP_CLAUSE_SHARED_READONLY (clause) = 1; else if (code == OMP_CLAUSE_FIRSTPRIVATE && (flags & GOVD_EXPLICIT) == 0) OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (clause) = 1; else if (code == OMP_CLAUSE_MAP && (flags & GOVD_MAP_0LEN_ARRAY) != 0) { tree nc = build_omp_clause (input_location, OMP_CLAUSE_MAP); OMP_CLAUSE_DECL (nc) = decl; if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE && TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == POINTER_TYPE) OMP_CLAUSE_DECL (clause) = build_simple_mem_ref_loc (input_location, decl); OMP_CLAUSE_DECL (clause) = build2 (MEM_REF, char_type_node, OMP_CLAUSE_DECL (clause), build_int_cst (build_pointer_type (char_type_node), 0)); OMP_CLAUSE_SIZE (clause) = size_zero_node; OMP_CLAUSE_SIZE (nc) = size_zero_node; OMP_CLAUSE_SET_MAP_KIND (clause, GOMP_MAP_ALLOC); OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (clause) = 1; OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_FIRSTPRIVATE_POINTER); OMP_CLAUSE_CHAIN (nc) = chain; OMP_CLAUSE_CHAIN (clause) = nc; struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; gimplify_omp_ctxp = ctx->outer_context; gimplify_expr (&TREE_OPERAND (OMP_CLAUSE_DECL (clause), 0), pre_p, NULL, is_gimple_val, fb_rvalue); gimplify_omp_ctxp = ctx; } else if (code == OMP_CLAUSE_MAP) { int kind; /* Not all combinations of these GOVD_MAP flags are actually valid. */ switch (flags & (GOVD_MAP_TO_ONLY | GOVD_MAP_FORCE | GOVD_MAP_FORCE_PRESENT | GOVD_MAP_ALLOC_ONLY | GOVD_MAP_FROM_ONLY)) { case 0: kind = GOMP_MAP_TOFROM; break; case GOVD_MAP_FORCE: kind = GOMP_MAP_TOFROM | GOMP_MAP_FLAG_FORCE; break; case GOVD_MAP_TO_ONLY: kind = GOMP_MAP_TO; break; case GOVD_MAP_FROM_ONLY: kind = GOMP_MAP_FROM; break; case GOVD_MAP_ALLOC_ONLY: kind = GOMP_MAP_ALLOC; break; case GOVD_MAP_TO_ONLY | GOVD_MAP_FORCE: kind = GOMP_MAP_TO | GOMP_MAP_FLAG_FORCE; break; case GOVD_MAP_FORCE_PRESENT: kind = GOMP_MAP_FORCE_PRESENT; break; default: gcc_unreachable (); } OMP_CLAUSE_SET_MAP_KIND (clause, kind); if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { tree decl2 = DECL_VALUE_EXPR (decl); gcc_assert (TREE_CODE (decl2) == INDIRECT_REF); decl2 = TREE_OPERAND (decl2, 0); gcc_assert (DECL_P (decl2)); tree mem = build_simple_mem_ref (decl2); OMP_CLAUSE_DECL (clause) = mem; OMP_CLAUSE_SIZE (clause) = TYPE_SIZE_UNIT (TREE_TYPE (decl)); if (gimplify_omp_ctxp->outer_context) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp->outer_context; omp_notice_variable (ctx, decl2, true); omp_notice_variable (ctx, OMP_CLAUSE_SIZE (clause), true); } tree nc = build_omp_clause (OMP_CLAUSE_LOCATION (clause), OMP_CLAUSE_MAP); OMP_CLAUSE_DECL (nc) = decl; OMP_CLAUSE_SIZE (nc) = size_zero_node; if (gimplify_omp_ctxp->target_firstprivatize_array_bases) OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_FIRSTPRIVATE_POINTER); else OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_POINTER); OMP_CLAUSE_CHAIN (nc) = OMP_CLAUSE_CHAIN (clause); OMP_CLAUSE_CHAIN (clause) = nc; } else if (gimplify_omp_ctxp->target_firstprivatize_array_bases && lang_hooks.decls.omp_privatize_by_reference (decl)) { OMP_CLAUSE_DECL (clause) = build_simple_mem_ref (decl); OMP_CLAUSE_SIZE (clause) = unshare_expr (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)))); struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; gimplify_omp_ctxp = ctx->outer_context; gimplify_expr (&OMP_CLAUSE_SIZE (clause), pre_p, NULL, is_gimple_val, fb_rvalue); gimplify_omp_ctxp = ctx; tree nc = build_omp_clause (OMP_CLAUSE_LOCATION (clause), OMP_CLAUSE_MAP); OMP_CLAUSE_DECL (nc) = decl; OMP_CLAUSE_SIZE (nc) = size_zero_node; OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_FIRSTPRIVATE_REFERENCE); OMP_CLAUSE_CHAIN (nc) = OMP_CLAUSE_CHAIN (clause); OMP_CLAUSE_CHAIN (clause) = nc; } else OMP_CLAUSE_SIZE (clause) = DECL_SIZE_UNIT (decl); } if (code == OMP_CLAUSE_FIRSTPRIVATE && (flags & GOVD_LASTPRIVATE) != 0) { tree nc = build_omp_clause (input_location, OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (nc) = decl; OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (nc) = 1; OMP_CLAUSE_CHAIN (nc) = chain; OMP_CLAUSE_CHAIN (clause) = nc; struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; gimplify_omp_ctxp = ctx->outer_context; lang_hooks.decls.omp_finish_clause (nc, pre_p, (ctx->region_type & ORT_ACC) != 0); gimplify_omp_ctxp = ctx; } *list_p = clause; struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; gimplify_omp_ctxp = ctx->outer_context; lang_hooks.decls.omp_finish_clause (clause, pre_p, (ctx->region_type & ORT_ACC) != 0); if (gimplify_omp_ctxp) for (; clause != chain; clause = OMP_CLAUSE_CHAIN (clause)) if (OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_MAP && DECL_P (OMP_CLAUSE_SIZE (clause))) omp_notice_variable (gimplify_omp_ctxp, OMP_CLAUSE_SIZE (clause), true); gimplify_omp_ctxp = ctx; return 0; } static void gimplify_adjust_omp_clauses (gimple_seq *pre_p, gimple_seq body, tree *list_p, enum tree_code code) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; tree *orig_list_p = list_p; tree c, decl; bool has_inscan_reductions = false; if (body) { struct gimplify_omp_ctx *octx; for (octx = ctx; octx; octx = octx->outer_context) if ((octx->region_type & (ORT_PARALLEL | ORT_TASK | ORT_TEAMS)) != 0) break; if (octx) { struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); walk_gimple_seq (body, omp_find_stores_stmt, omp_find_stores_op, &wi); } } if (ctx->add_safelen1) { /* If there are VLAs in the body of simd loop, prevent vectorization. */ gcc_assert (ctx->region_type == ORT_SIMD); c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN); OMP_CLAUSE_SAFELEN_EXPR (c) = integer_one_node; OMP_CLAUSE_CHAIN (c) = *list_p; *list_p = c; list_p = &OMP_CLAUSE_CHAIN (c); } if (ctx->region_type == ORT_WORKSHARE && ctx->outer_context && ctx->outer_context->region_type == ORT_COMBINED_PARALLEL) { for (c = ctx->outer_context->clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)) { decl = OMP_CLAUSE_DECL (c); splay_tree_node n = splay_tree_lookup (ctx->outer_context->variables, (splay_tree_key) decl); gcc_checking_assert (!splay_tree_lookup (ctx->variables, (splay_tree_key) decl)); omp_add_variable (ctx, decl, n->value); tree c2 = copy_node (c); OMP_CLAUSE_CHAIN (c2) = *list_p; *list_p = c2; if ((n->value & GOVD_FIRSTPRIVATE) == 0) continue; c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c2) = decl; OMP_CLAUSE_CHAIN (c2) = *list_p; *list_p = c2; } } while ((c = *list_p) != NULL) { splay_tree_node n; bool remove = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_FIRSTPRIVATE: if ((ctx->region_type & ORT_TARGET) && (ctx->region_type & ORT_ACC) == 0 && TYPE_ATOMIC (strip_array_types (TREE_TYPE (OMP_CLAUSE_DECL (c))))) { error_at (OMP_CLAUSE_LOCATION (c), "%<_Atomic%> %qD in %<firstprivate%> clause on " "%<target%> construct", OMP_CLAUSE_DECL (c)); remove = true; break; } /* FALLTHRU */ case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_SHARED: case OMP_CLAUSE_LINEAR: decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); remove = !(n->value & GOVD_SEEN); if ((n->value & GOVD_LASTPRIVATE_CONDITIONAL) != 0 && code == OMP_PARALLEL && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE) remove = true; if (! remove) { bool shared = OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED; if ((n->value & GOVD_DEBUG_PRIVATE) || lang_hooks.decls.omp_private_debug_clause (decl, shared)) { gcc_assert ((n->value & GOVD_DEBUG_PRIVATE) == 0 || ((n->value & GOVD_DATA_SHARE_CLASS) == GOVD_SHARED)); OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_PRIVATE_DEBUG (c) = 1; } if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED && (n->value & GOVD_WRITTEN) == 0 && DECL_P (decl) && omp_shared_to_firstprivate_optimizable_decl_p (decl)) OMP_CLAUSE_SHARED_READONLY (c) = 1; else if (DECL_P (decl) && ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED && (n->value & GOVD_WRITTEN) != 0) || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))) && omp_shared_to_firstprivate_optimizable_decl_p (decl)) omp_mark_stores (gimplify_omp_ctxp->outer_context, decl); } break; case OMP_CLAUSE_LASTPRIVATE: /* Make sure OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE is set to accurately reflect the presence of a FIRSTPRIVATE clause. */ decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c) = (n->value & GOVD_FIRSTPRIVATE) != 0; if (code == OMP_DISTRIBUTE && OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) { remove = true; error_at (OMP_CLAUSE_LOCATION (c), "same variable used in %<firstprivate%> and " "%<lastprivate%> clauses on %<distribute%> " "construct"); } if (!remove && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && DECL_P (decl) && omp_shared_to_firstprivate_optimizable_decl_p (decl)) omp_mark_stores (gimplify_omp_ctxp->outer_context, decl); if (OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) && code == OMP_PARALLEL) remove = true; break; case OMP_CLAUSE_ALIGNED: decl = OMP_CLAUSE_DECL (c); if (!is_global_var (decl)) { n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); remove = n == NULL || !(n->value & GOVD_SEEN); if (!remove && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE) { struct gimplify_omp_ctx *octx; if (n != NULL && (n->value & (GOVD_DATA_SHARE_CLASS & ~GOVD_FIRSTPRIVATE))) remove = true; else for (octx = ctx->outer_context; octx; octx = octx->outer_context) { n = splay_tree_lookup (octx->variables, (splay_tree_key) decl); if (n == NULL) continue; if (n->value & GOVD_LOCAL) break; /* We have to avoid assigning a shared variable to itself when trying to add __builtin_assume_aligned. */ if (n->value & GOVD_SHARED) { remove = true; break; } } } } else if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) { n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if (n != NULL && (n->value & GOVD_DATA_SHARE_CLASS) != 0) remove = true; } break; case OMP_CLAUSE_NONTEMPORAL: decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); remove = n == NULL || !(n->value & GOVD_SEEN); break; case OMP_CLAUSE_MAP: if (code == OMP_TARGET_EXIT_DATA && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER) { remove = true; break; } decl = OMP_CLAUSE_DECL (c); /* Data clauses associated with reductions must be compatible with present_or_copy. Warn and adjust the clause if that is not the case. */ if (ctx->region_type == ORT_ACC_PARALLEL || ctx->region_type == ORT_ACC_SERIAL) { tree t = DECL_P (decl) ? decl : TREE_OPERAND (decl, 0); n = NULL; if (DECL_P (t)) n = splay_tree_lookup (ctx->variables, (splay_tree_key) t); if (n && (n->value & GOVD_REDUCTION)) { enum gomp_map_kind kind = OMP_CLAUSE_MAP_KIND (c); OMP_CLAUSE_MAP_IN_REDUCTION (c) = 1; if ((kind & GOMP_MAP_TOFROM) != GOMP_MAP_TOFROM && kind != GOMP_MAP_FORCE_PRESENT && kind != GOMP_MAP_POINTER) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "incompatible data clause with reduction " "on %qE; promoting to %<present_or_copy%>", DECL_NAME (t)); OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_TOFROM); } } } if (!DECL_P (decl)) { if ((ctx->region_type & ORT_TARGET) != 0 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER) { if (TREE_CODE (decl) == INDIRECT_REF && TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0))) == REFERENCE_TYPE)) decl = TREE_OPERAND (decl, 0); if (TREE_CODE (decl) == COMPONENT_REF) { while (TREE_CODE (decl) == COMPONENT_REF) decl = TREE_OPERAND (decl, 0); if (DECL_P (decl)) { n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if (!(n->value & GOVD_SEEN)) remove = true; } } } break; } n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if ((ctx->region_type & ORT_TARGET) != 0 && !(n->value & GOVD_SEEN) && GOMP_MAP_ALWAYS_P (OMP_CLAUSE_MAP_KIND (c)) == 0 && (!is_global_var (decl) || !lookup_attribute ("omp declare target link", DECL_ATTRIBUTES (decl)))) { remove = true; /* For struct element mapping, if struct is never referenced in target block and none of the mapping has always modifier, remove all the struct element mappings, which immediately follow the GOMP_MAP_STRUCT map clause. */ if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_STRUCT) { HOST_WIDE_INT cnt = tree_to_shwi (OMP_CLAUSE_SIZE (c)); while (cnt--) OMP_CLAUSE_CHAIN (c) = OMP_CLAUSE_CHAIN (OMP_CLAUSE_CHAIN (c)); } } else if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_STRUCT && (code == OMP_TARGET_EXIT_DATA || code == OACC_EXIT_DATA)) remove = true; else if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_POINTER && OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER && (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_REFERENCE)) { /* For GOMP_MAP_FORCE_DEVICEPTR, we'll never enter here, because for these, TREE_CODE (DECL_SIZE (decl)) will always be INTEGER_CST. */ gcc_assert (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FORCE_DEVICEPTR); tree decl2 = DECL_VALUE_EXPR (decl); gcc_assert (TREE_CODE (decl2) == INDIRECT_REF); decl2 = TREE_OPERAND (decl2, 0); gcc_assert (DECL_P (decl2)); tree mem = build_simple_mem_ref (decl2); OMP_CLAUSE_DECL (c) = mem; OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (decl)); if (ctx->outer_context) { omp_notice_variable (ctx->outer_context, decl2, true); omp_notice_variable (ctx->outer_context, OMP_CLAUSE_SIZE (c), true); } if (((ctx->region_type & ORT_TARGET) != 0 || !ctx->target_firstprivatize_array_bases) && ((n->value & GOVD_SEEN) == 0 || (n->value & (GOVD_PRIVATE | GOVD_FIRSTPRIVATE)) == 0)) { tree nc = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); OMP_CLAUSE_DECL (nc) = decl; OMP_CLAUSE_SIZE (nc) = size_zero_node; if (ctx->target_firstprivatize_array_bases) OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_FIRSTPRIVATE_POINTER); else OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_POINTER); OMP_CLAUSE_CHAIN (nc) = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = nc; c = nc; } } else { if (OMP_CLAUSE_SIZE (c) == NULL_TREE) OMP_CLAUSE_SIZE (c) = DECL_SIZE_UNIT (decl); gcc_assert ((n->value & GOVD_SEEN) == 0 || ((n->value & (GOVD_PRIVATE | GOVD_FIRSTPRIVATE)) == 0)); } break; case OMP_CLAUSE_TO: case OMP_CLAUSE_FROM: case OMP_CLAUSE__CACHE_: decl = OMP_CLAUSE_DECL (c); if (!DECL_P (decl)) break; if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) { tree decl2 = DECL_VALUE_EXPR (decl); gcc_assert (TREE_CODE (decl2) == INDIRECT_REF); decl2 = TREE_OPERAND (decl2, 0); gcc_assert (DECL_P (decl2)); tree mem = build_simple_mem_ref (decl2); OMP_CLAUSE_DECL (c) = mem; OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (decl)); if (ctx->outer_context) { omp_notice_variable (ctx->outer_context, decl2, true); omp_notice_variable (ctx->outer_context, OMP_CLAUSE_SIZE (c), true); } } else if (OMP_CLAUSE_SIZE (c) == NULL_TREE) OMP_CLAUSE_SIZE (c) = DECL_SIZE_UNIT (decl); break; case OMP_CLAUSE_REDUCTION: if (OMP_CLAUSE_REDUCTION_INSCAN (c)) { decl = OMP_CLAUSE_DECL (c); n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if ((n->value & GOVD_REDUCTION_INSCAN) == 0) { remove = true; error_at (OMP_CLAUSE_LOCATION (c), "%qD specified in %<inscan%> %<reduction%> clause " "but not in %<scan%> directive clause", decl); break; } has_inscan_reductions = true; } /* FALLTHRU */ case OMP_CLAUSE_IN_REDUCTION: case OMP_CLAUSE_TASK_REDUCTION: decl = OMP_CLAUSE_DECL (c); /* OpenACC reductions need a present_or_copy data clause. Add one if necessary. Emit error when the reduction is private. */ if (ctx->region_type == ORT_ACC_PARALLEL || ctx->region_type == ORT_ACC_SERIAL) { n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl); if (n->value & (GOVD_PRIVATE | GOVD_FIRSTPRIVATE)) { remove = true; error_at (OMP_CLAUSE_LOCATION (c), "invalid private " "reduction on %qE", DECL_NAME (decl)); } else if ((n->value & GOVD_MAP) == 0) { tree next = OMP_CLAUSE_CHAIN (c); tree nc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (nc, GOMP_MAP_TOFROM); OMP_CLAUSE_DECL (nc) = decl; OMP_CLAUSE_CHAIN (c) = nc; lang_hooks.decls.omp_finish_clause (nc, pre_p, (ctx->region_type & ORT_ACC) != 0); while (1) { OMP_CLAUSE_MAP_IN_REDUCTION (nc) = 1; if (OMP_CLAUSE_CHAIN (nc) == NULL) break; nc = OMP_CLAUSE_CHAIN (nc); } OMP_CLAUSE_CHAIN (nc) = next; n->value |= GOVD_MAP; } } if (DECL_P (decl) && omp_shared_to_firstprivate_optimizable_decl_p (decl)) omp_mark_stores (gimplify_omp_ctxp->outer_context, decl); break; case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_COPYPRIVATE: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_THREAD_LIMIT: case OMP_CLAUSE_DIST_SCHEDULE: case OMP_CLAUSE_DEVICE: case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE_SAFELEN: case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_DEPEND: case OMP_CLAUSE_PRIORITY: case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_THREADS: case OMP_CLAUSE_SIMD: case OMP_CLAUSE_HINT: case OMP_CLAUSE_DEFAULTMAP: case OMP_CLAUSE_ORDER: case OMP_CLAUSE_BIND: case OMP_CLAUSE_USE_DEVICE_PTR: case OMP_CLAUSE_USE_DEVICE_ADDR: case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_ASYNC: case OMP_CLAUSE_WAIT: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_NUM_GANGS: case OMP_CLAUSE_NUM_WORKERS: case OMP_CLAUSE_VECTOR_LENGTH: case OMP_CLAUSE_GANG: case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_SEQ: case OMP_CLAUSE_TILE: case OMP_CLAUSE_IF_PRESENT: case OMP_CLAUSE_FINALIZE: case OMP_CLAUSE_INCLUSIVE: case OMP_CLAUSE_EXCLUSIVE: break; default: gcc_unreachable (); } if (remove) *list_p = OMP_CLAUSE_CHAIN (c); else list_p = &OMP_CLAUSE_CHAIN (c); } /* Add in any implicit data sharing. */ struct gimplify_adjust_omp_clauses_data data; data.list_p = list_p; data.pre_p = pre_p; splay_tree_foreach (ctx->variables, gimplify_adjust_omp_clauses_1, &data); if (has_inscan_reductions) for (c = *orig_list_p; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYIN (c)) { error_at (OMP_CLAUSE_LOCATION (c), "%<inscan%> %<reduction%> clause used together with " "%<linear%> clause for a variable other than loop " "iterator"); break; } gimplify_omp_ctxp = ctx->outer_context; delete_omp_context (ctx); } /* Return 0 if CONSTRUCTS selectors don't match the OpenMP context, -1 if unknown yet (simd is involved, won't be known until vectorization) and 1 if they do. If SCORES is non-NULL, it should point to an array of at least 2*NCONSTRUCTS+2 ints, and will be filled with the positions of the CONSTRUCTS (position -1 if it will never match) followed by number of constructs in the OpenMP context construct trait. If the score depends on whether it will be in a declare simd clone or not, the function returns 2 and there will be two sets of the scores, the first one for the case that it is not in a declare simd clone, the other that it is in a declare simd clone. */ int omp_construct_selector_matches (enum tree_code *constructs, int nconstructs, int *scores) { int matched = 0, cnt = 0; bool simd_seen = false; bool target_seen = false; int declare_simd_cnt = -1; auto_vec<enum tree_code, 16> codes; for (struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; ctx;) { if (((ctx->region_type & ORT_PARALLEL) && ctx->code == OMP_PARALLEL) || ((ctx->region_type & (ORT_TARGET | ORT_IMPLICIT_TARGET | ORT_ACC)) == ORT_TARGET && ctx->code == OMP_TARGET) || ((ctx->region_type & ORT_TEAMS) && ctx->code == OMP_TEAMS) || (ctx->region_type == ORT_WORKSHARE && ctx->code == OMP_FOR) || (ctx->region_type == ORT_SIMD && ctx->code == OMP_SIMD && !omp_find_clause (ctx->clauses, OMP_CLAUSE_BIND))) { ++cnt; if (scores) codes.safe_push (ctx->code); else if (matched < nconstructs && ctx->code == constructs[matched]) { if (ctx->code == OMP_SIMD) { if (matched) return 0; simd_seen = true; } ++matched; } if (ctx->code == OMP_TARGET) { if (scores == NULL) return matched < nconstructs ? 0 : simd_seen ? -1 : 1; target_seen = true; break; } } else if (ctx->region_type == ORT_WORKSHARE && ctx->code == OMP_LOOP && ctx->outer_context && ctx->outer_context->region_type == ORT_COMBINED_PARALLEL && ctx->outer_context->outer_context && ctx->outer_context->outer_context->code == OMP_LOOP && ctx->outer_context->outer_context->distribute) ctx = ctx->outer_context->outer_context; ctx = ctx->outer_context; } if (!target_seen && lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (current_function_decl))) { /* Declare simd is a maybe case, it is supposed to be added only to the omp-simd-clone.c added clones and not to the base function. */ declare_simd_cnt = cnt++; if (scores) codes.safe_push (OMP_SIMD); else if (cnt == 0 && constructs[0] == OMP_SIMD) { gcc_assert (matched == 0); simd_seen = true; if (++matched == nconstructs) return -1; } } if (tree attr = lookup_attribute ("omp declare variant variant", DECL_ATTRIBUTES (current_function_decl))) { enum tree_code variant_constructs[5]; int variant_nconstructs = 0; if (!target_seen) variant_nconstructs = omp_constructor_traits_to_codes (TREE_VALUE (attr), variant_constructs); for (int i = 0; i < variant_nconstructs; i++) { ++cnt; if (scores) codes.safe_push (variant_constructs[i]); else if (matched < nconstructs && variant_constructs[i] == constructs[matched]) { if (variant_constructs[i] == OMP_SIMD) { if (matched) return 0; simd_seen = true; } ++matched; } } } if (!target_seen && lookup_attribute ("omp declare target block", DECL_ATTRIBUTES (current_function_decl))) { if (scores) codes.safe_push (OMP_TARGET); else if (matched < nconstructs && constructs[matched] == OMP_TARGET) ++matched; } if (scores) { for (int pass = 0; pass < (declare_simd_cnt == -1 ? 1 : 2); pass++) { int j = codes.length () - 1; for (int i = nconstructs - 1; i >= 0; i--) { while (j >= 0 && (pass != 0 || declare_simd_cnt != j) && constructs[i] != codes[j]) --j; if (pass == 0 && declare_simd_cnt != -1 && j > declare_simd_cnt) *scores++ = j - 1; else *scores++ = j; } *scores++ = ((pass == 0 && declare_simd_cnt != -1) ? codes.length () - 1 : codes.length ()); } return declare_simd_cnt == -1 ? 1 : 2; } if (matched == nconstructs) return simd_seen ? -1 : 1; return 0; } /* Gimplify OACC_CACHE. */ static void gimplify_oacc_cache (tree *expr_p, gimple_seq *pre_p) { tree expr = *expr_p; gimplify_scan_omp_clauses (&OACC_CACHE_CLAUSES (expr), pre_p, ORT_ACC, OACC_CACHE); gimplify_adjust_omp_clauses (pre_p, NULL, &OACC_CACHE_CLAUSES (expr), OACC_CACHE); /* TODO: Do something sensible with this information. */ *expr_p = NULL_TREE; } /* Helper function of gimplify_oacc_declare. The helper's purpose is to, if required, translate 'kind' in CLAUSE into an 'entry' kind and 'exit' kind. The entry kind will replace the one in CLAUSE, while the exit kind will be used in a new omp_clause and returned to the caller. */ static tree gimplify_oacc_declare_1 (tree clause) { HOST_WIDE_INT kind, new_op; bool ret = false; tree c = NULL; kind = OMP_CLAUSE_MAP_KIND (clause); switch (kind) { case GOMP_MAP_ALLOC: new_op = GOMP_MAP_RELEASE; ret = true; break; case GOMP_MAP_FROM: OMP_CLAUSE_SET_MAP_KIND (clause, GOMP_MAP_FORCE_ALLOC); new_op = GOMP_MAP_FROM; ret = true; break; case GOMP_MAP_TOFROM: OMP_CLAUSE_SET_MAP_KIND (clause, GOMP_MAP_TO); new_op = GOMP_MAP_FROM; ret = true; break; case GOMP_MAP_DEVICE_RESIDENT: case GOMP_MAP_FORCE_DEVICEPTR: case GOMP_MAP_FORCE_PRESENT: case GOMP_MAP_LINK: case GOMP_MAP_POINTER: case GOMP_MAP_TO: break; default: gcc_unreachable (); break; } if (ret) { c = build_omp_clause (OMP_CLAUSE_LOCATION (clause), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (c, new_op); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clause); } return c; } /* Gimplify OACC_DECLARE. */ static void gimplify_oacc_declare (tree *expr_p, gimple_seq *pre_p) { tree expr = *expr_p; gomp_target *stmt; tree clauses, t, decl; clauses = OACC_DECLARE_CLAUSES (expr); gimplify_scan_omp_clauses (&clauses, pre_p, ORT_TARGET_DATA, OACC_DECLARE); gimplify_adjust_omp_clauses (pre_p, NULL, &clauses, OACC_DECLARE); for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t)) { decl = OMP_CLAUSE_DECL (t); if (TREE_CODE (decl) == MEM_REF) decl = TREE_OPERAND (decl, 0); if (VAR_P (decl) && !is_oacc_declared (decl)) { tree attr = get_identifier ("oacc declare target"); DECL_ATTRIBUTES (decl) = tree_cons (attr, NULL_TREE, DECL_ATTRIBUTES (decl)); } if (VAR_P (decl) && !is_global_var (decl) && DECL_CONTEXT (decl) == current_function_decl) { tree c = gimplify_oacc_declare_1 (t); if (c) { if (oacc_declare_returns == NULL) oacc_declare_returns = new hash_map<tree, tree>; oacc_declare_returns->put (decl, c); } } if (gimplify_omp_ctxp) omp_add_variable (gimplify_omp_ctxp, decl, GOVD_SEEN); } stmt = gimple_build_omp_target (NULL, GF_OMP_TARGET_KIND_OACC_DECLARE, clauses); gimplify_seq_add_stmt (pre_p, stmt); *expr_p = NULL_TREE; } /* Gimplify the contents of an OMP_PARALLEL statement. This involves gimplification of the body, as well as scanning the body for used variables. We need to do this scan now, because variable-sized decls will be decomposed during gimplification. */ static void gimplify_omp_parallel (tree *expr_p, gimple_seq *pre_p) { tree expr = *expr_p; gimple *g; gimple_seq body = NULL; gimplify_scan_omp_clauses (&OMP_PARALLEL_CLAUSES (expr), pre_p, OMP_PARALLEL_COMBINED (expr) ? ORT_COMBINED_PARALLEL : ORT_PARALLEL, OMP_PARALLEL); push_gimplify_context (); g = gimplify_and_return_first (OMP_PARALLEL_BODY (expr), &body); if (gimple_code (g) == GIMPLE_BIND) pop_gimplify_context (g); else pop_gimplify_context (NULL); gimplify_adjust_omp_clauses (pre_p, body, &OMP_PARALLEL_CLAUSES (expr), OMP_PARALLEL); g = gimple_build_omp_parallel (body, OMP_PARALLEL_CLAUSES (expr), NULL_TREE, NULL_TREE); if (OMP_PARALLEL_COMBINED (expr)) gimple_omp_set_subcode (g, GF_OMP_PARALLEL_COMBINED); gimplify_seq_add_stmt (pre_p, g); *expr_p = NULL_TREE; } /* Gimplify the contents of an OMP_TASK statement. This involves gimplification of the body, as well as scanning the body for used variables. We need to do this scan now, because variable-sized decls will be decomposed during gimplification. */ static void gimplify_omp_task (tree *expr_p, gimple_seq *pre_p) { tree expr = *expr_p; gimple *g; gimple_seq body = NULL; if (OMP_TASK_BODY (expr) == NULL_TREE) for (tree c = OMP_TASK_CLAUSES (expr); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND && OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_MUTEXINOUTSET) { error_at (OMP_CLAUSE_LOCATION (c), "%<mutexinoutset%> kind in %<depend%> clause on a " "%<taskwait%> construct"); break; } gimplify_scan_omp_clauses (&OMP_TASK_CLAUSES (expr), pre_p, omp_find_clause (OMP_TASK_CLAUSES (expr), OMP_CLAUSE_UNTIED) ? ORT_UNTIED_TASK : ORT_TASK, OMP_TASK); if (OMP_TASK_BODY (expr)) { push_gimplify_context (); g = gimplify_and_return_first (OMP_TASK_BODY (expr), &body); if (gimple_code (g) == GIMPLE_BIND) pop_gimplify_context (g); else pop_gimplify_context (NULL); } gimplify_adjust_omp_clauses (pre_p, body, &OMP_TASK_CLAUSES (expr), OMP_TASK); g = gimple_build_omp_task (body, OMP_TASK_CLAUSES (expr), NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); if (OMP_TASK_BODY (expr) == NULL_TREE) gimple_omp_task_set_taskwait_p (g, true); gimplify_seq_add_stmt (pre_p, g); *expr_p = NULL_TREE; } /* Helper function for gimplify_omp_for. If *TP is not a gimple constant, force it into a temporary initialized in PRE_P and add firstprivate clause to ORIG_FOR_STMT. */ static void gimplify_omp_taskloop_expr (tree type, tree *tp, gimple_seq *pre_p, tree orig_for_stmt) { if (*tp == NULL || is_gimple_constant (*tp)) return; *tp = get_initialized_tmp_var (*tp, pre_p, NULL, false); /* Reference to pointer conversion is considered useless, but is significant for firstprivate clause. Force it here. */ if (type && TREE_CODE (type) == POINTER_TYPE && TREE_CODE (TREE_TYPE (*tp)) == REFERENCE_TYPE) { tree v = create_tmp_var (TYPE_MAIN_VARIANT (type)); tree m = build2 (INIT_EXPR, TREE_TYPE (v), v, *tp); gimplify_and_add (m, pre_p); *tp = v; } tree c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = *tp; OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (orig_for_stmt); OMP_FOR_CLAUSES (orig_for_stmt) = c; } /* Gimplify the gross structure of an OMP_FOR statement. */ static enum gimplify_status gimplify_omp_for (tree *expr_p, gimple_seq *pre_p) { tree for_stmt, orig_for_stmt, inner_for_stmt = NULL_TREE, decl, var, t; enum gimplify_status ret = GS_ALL_DONE; enum gimplify_status tret; gomp_for *gfor; gimple_seq for_body, for_pre_body; int i; bitmap has_decl_expr = NULL; enum omp_region_type ort = ORT_WORKSHARE; bool openacc = TREE_CODE (*expr_p) == OACC_LOOP; orig_for_stmt = for_stmt = *expr_p; bool loop_p = (omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_BIND) != NULL_TREE); if (OMP_FOR_INIT (for_stmt) == NULL_TREE) { tree *data[4] = { NULL, NULL, NULL, NULL }; gcc_assert (TREE_CODE (for_stmt) != OACC_LOOP); inner_for_stmt = walk_tree (&OMP_FOR_BODY (for_stmt), find_combined_omp_for, data, NULL); if (inner_for_stmt == NULL_TREE) { gcc_assert (seen_error ()); *expr_p = NULL_TREE; return GS_ERROR; } if (data[2] && OMP_FOR_PRE_BODY (*data[2])) { append_to_statement_list_force (OMP_FOR_PRE_BODY (*data[2]), &OMP_FOR_PRE_BODY (for_stmt)); OMP_FOR_PRE_BODY (*data[2]) = NULL_TREE; } if (OMP_FOR_PRE_BODY (inner_for_stmt)) { append_to_statement_list_force (OMP_FOR_PRE_BODY (inner_for_stmt), &OMP_FOR_PRE_BODY (for_stmt)); OMP_FOR_PRE_BODY (inner_for_stmt) = NULL_TREE; } if (data[0]) { /* We have some statements or variable declarations in between the composite construct directives. Move them around the inner_for_stmt. */ data[0] = expr_p; for (i = 0; i < 3; i++) if (data[i]) { tree t = *data[i]; if (i < 2 && data[i + 1] == &OMP_BODY (t)) data[i + 1] = data[i]; *data[i] = OMP_BODY (t); tree body = build3 (BIND_EXPR, void_type_node, NULL_TREE, NULL_TREE, make_node (BLOCK)); OMP_BODY (t) = body; append_to_statement_list_force (inner_for_stmt, &BIND_EXPR_BODY (body)); *data[3] = t; data[3] = tsi_stmt_ptr (tsi_start (BIND_EXPR_BODY (body))); gcc_assert (*data[3] == inner_for_stmt); } return GS_OK; } for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (inner_for_stmt)); i++) if (!loop_p && OMP_FOR_ORIG_DECLS (inner_for_stmt) && TREE_CODE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (inner_for_stmt), i)) == TREE_LIST && TREE_PURPOSE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (inner_for_stmt), i))) { tree orig = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (inner_for_stmt), i); /* Class iterators aren't allowed on OMP_SIMD, so the only case we need to solve is distribute parallel for. They are allowed on the loop construct, but that is already handled in gimplify_omp_loop. */ gcc_assert (TREE_CODE (inner_for_stmt) == OMP_FOR && TREE_CODE (for_stmt) == OMP_DISTRIBUTE && data[1]); tree orig_decl = TREE_PURPOSE (orig); tree last = TREE_VALUE (orig); tree *pc; for (pc = &OMP_FOR_CLAUSES (inner_for_stmt); *pc; pc = &OMP_CLAUSE_CHAIN (*pc)) if ((OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_PRIVATE || OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_LASTPRIVATE) && OMP_CLAUSE_DECL (*pc) == orig_decl) break; if (*pc == NULL_TREE) { tree *spc; for (spc = &OMP_PARALLEL_CLAUSES (*data[1]); *spc; spc = &OMP_CLAUSE_CHAIN (*spc)) if (OMP_CLAUSE_CODE (*spc) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_DECL (*spc) == orig_decl) break; if (*spc) { tree c = *spc; *spc = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = NULL_TREE; *pc = c; } } if (*pc == NULL_TREE) ; else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_PRIVATE) { /* private clause will appear only on inner_for_stmt. Change it into firstprivate, and add private clause on for_stmt. */ tree c = copy_node (*pc); OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (for_stmt); OMP_FOR_CLAUSES (for_stmt) = c; OMP_CLAUSE_CODE (*pc) = OMP_CLAUSE_FIRSTPRIVATE; lang_hooks.decls.omp_finish_clause (*pc, pre_p, openacc); } else { /* lastprivate clause will appear on both inner_for_stmt and for_stmt. Add firstprivate clause to inner_for_stmt. */ tree c = build_omp_clause (OMP_CLAUSE_LOCATION (*pc), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (*pc); OMP_CLAUSE_CHAIN (c) = *pc; *pc = c; lang_hooks.decls.omp_finish_clause (*pc, pre_p, openacc); } tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = last; OMP_CLAUSE_CHAIN (c) = OMP_PARALLEL_CLAUSES (*data[1]); OMP_PARALLEL_CLAUSES (*data[1]) = c; c = build_omp_clause (UNKNOWN_LOCATION, *pc ? OMP_CLAUSE_SHARED : OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = orig_decl; OMP_CLAUSE_CHAIN (c) = OMP_PARALLEL_CLAUSES (*data[1]); OMP_PARALLEL_CLAUSES (*data[1]) = c; } /* Similarly, take care of C++ range for temporaries, those should be firstprivate on OMP_PARALLEL if any. */ if (data[1]) for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (inner_for_stmt)); i++) if (OMP_FOR_ORIG_DECLS (inner_for_stmt) && TREE_CODE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (inner_for_stmt), i)) == TREE_LIST && TREE_CHAIN (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (inner_for_stmt), i))) { tree orig = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (inner_for_stmt), i); tree v = TREE_CHAIN (orig); tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_FIRSTPRIVATE); /* First add firstprivate clause for the __for_end artificial decl. */ OMP_CLAUSE_DECL (c) = TREE_VEC_ELT (v, 1); if (TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) == REFERENCE_TYPE) OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE (c) = 1; OMP_CLAUSE_CHAIN (c) = OMP_PARALLEL_CLAUSES (*data[1]); OMP_PARALLEL_CLAUSES (*data[1]) = c; if (TREE_VEC_ELT (v, 0)) { /* And now the same for __for_range artificial decl if it exists. */ c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = TREE_VEC_ELT (v, 0); if (TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) == REFERENCE_TYPE) OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE (c) = 1; OMP_CLAUSE_CHAIN (c) = OMP_PARALLEL_CLAUSES (*data[1]); OMP_PARALLEL_CLAUSES (*data[1]) = c; } } } switch (TREE_CODE (for_stmt)) { case OMP_FOR: if (OMP_FOR_NON_RECTANGULAR (inner_for_stmt ? inner_for_stmt : for_stmt)) { if (omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_SCHEDULE)) error_at (EXPR_LOCATION (for_stmt), "%qs clause may not appear on non-rectangular %qs", "schedule", "for"); if (omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_ORDERED)) error_at (EXPR_LOCATION (for_stmt), "%qs clause may not appear on non-rectangular %qs", "ordered", "for"); } break; case OMP_DISTRIBUTE: if (OMP_FOR_NON_RECTANGULAR (inner_for_stmt ? inner_for_stmt : for_stmt) && omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_DIST_SCHEDULE)) error_at (EXPR_LOCATION (for_stmt), "%qs clause may not appear on non-rectangular %qs", "dist_schedule", "distribute"); break; case OACC_LOOP: ort = ORT_ACC; break; case OMP_TASKLOOP: if (omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_UNTIED)) ort = ORT_UNTIED_TASKLOOP; else ort = ORT_TASKLOOP; break; case OMP_SIMD: ort = ORT_SIMD; break; default: gcc_unreachable (); } /* Set OMP_CLAUSE_LINEAR_NO_COPYIN flag on explicit linear clause for the IV. */ if (ort == ORT_SIMD && TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1) { t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), 0); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); decl = TREE_OPERAND (t, 0); for (tree c = OMP_FOR_CLAUSES (for_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && OMP_CLAUSE_DECL (c) == decl) { OMP_CLAUSE_LINEAR_NO_COPYIN (c) = 1; break; } } if (TREE_CODE (for_stmt) != OMP_TASKLOOP) gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p, ort, loop_p && TREE_CODE (for_stmt) != OMP_SIMD ? OMP_LOOP : TREE_CODE (for_stmt)); if (TREE_CODE (for_stmt) == OMP_DISTRIBUTE) gimplify_omp_ctxp->distribute = true; /* Handle OMP_FOR_INIT. */ for_pre_body = NULL; if ((ort == ORT_SIMD || (inner_for_stmt && TREE_CODE (inner_for_stmt) == OMP_SIMD)) && OMP_FOR_PRE_BODY (for_stmt)) { has_decl_expr = BITMAP_ALLOC (NULL); if (TREE_CODE (OMP_FOR_PRE_BODY (for_stmt)) == DECL_EXPR && TREE_CODE (DECL_EXPR_DECL (OMP_FOR_PRE_BODY (for_stmt))) == VAR_DECL) { t = OMP_FOR_PRE_BODY (for_stmt); bitmap_set_bit (has_decl_expr, DECL_UID (DECL_EXPR_DECL (t))); } else if (TREE_CODE (OMP_FOR_PRE_BODY (for_stmt)) == STATEMENT_LIST) { tree_stmt_iterator si; for (si = tsi_start (OMP_FOR_PRE_BODY (for_stmt)); !tsi_end_p (si); tsi_next (&si)) { t = tsi_stmt (si); if (TREE_CODE (t) == DECL_EXPR && TREE_CODE (DECL_EXPR_DECL (t)) == VAR_DECL) bitmap_set_bit (has_decl_expr, DECL_UID (DECL_EXPR_DECL (t))); } } } if (OMP_FOR_PRE_BODY (for_stmt)) { if (TREE_CODE (for_stmt) != OMP_TASKLOOP || gimplify_omp_ctxp) gimplify_and_add (OMP_FOR_PRE_BODY (for_stmt), &for_pre_body); else { struct gimplify_omp_ctx ctx; memset (&ctx, 0, sizeof (ctx)); ctx.region_type = ORT_NONE; gimplify_omp_ctxp = &ctx; gimplify_and_add (OMP_FOR_PRE_BODY (for_stmt), &for_pre_body); gimplify_omp_ctxp = NULL; } } OMP_FOR_PRE_BODY (for_stmt) = NULL_TREE; if (OMP_FOR_INIT (for_stmt) == NULL_TREE) for_stmt = inner_for_stmt; /* For taskloop, need to gimplify the start, end and step before the taskloop, outside of the taskloop omp context. */ if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP) { for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++) { t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i); gimple_seq *for_pre_p = (gimple_seq_empty_p (for_pre_body) ? pre_p : &for_pre_body); tree type = TREE_TYPE (TREE_OPERAND (t, 0)); if (TREE_CODE (TREE_OPERAND (t, 1)) == TREE_VEC) { tree v = TREE_OPERAND (t, 1); gimplify_omp_taskloop_expr (type, &TREE_VEC_ELT (v, 1), for_pre_p, orig_for_stmt); gimplify_omp_taskloop_expr (type, &TREE_VEC_ELT (v, 2), for_pre_p, orig_for_stmt); } else gimplify_omp_taskloop_expr (type, &TREE_OPERAND (t, 1), for_pre_p, orig_for_stmt); /* Handle OMP_FOR_COND. */ t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i); if (TREE_CODE (TREE_OPERAND (t, 1)) == TREE_VEC) { tree v = TREE_OPERAND (t, 1); gimplify_omp_taskloop_expr (type, &TREE_VEC_ELT (v, 1), for_pre_p, orig_for_stmt); gimplify_omp_taskloop_expr (type, &TREE_VEC_ELT (v, 2), for_pre_p, orig_for_stmt); } else gimplify_omp_taskloop_expr (type, &TREE_OPERAND (t, 1), for_pre_p, orig_for_stmt); /* Handle OMP_FOR_INCR. */ t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i); if (TREE_CODE (t) == MODIFY_EXPR) { decl = TREE_OPERAND (t, 0); t = TREE_OPERAND (t, 1); tree *tp = &TREE_OPERAND (t, 1); if (TREE_CODE (t) == PLUS_EXPR && *tp == decl) tp = &TREE_OPERAND (t, 0); gimplify_omp_taskloop_expr (NULL_TREE, tp, for_pre_p, orig_for_stmt); } } gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (orig_for_stmt), pre_p, ort, OMP_TASKLOOP); } if (orig_for_stmt != for_stmt) gimplify_omp_ctxp->combined_loop = true; for_body = NULL; gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == TREE_VEC_LENGTH (OMP_FOR_COND (for_stmt))); gcc_assert (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == TREE_VEC_LENGTH (OMP_FOR_INCR (for_stmt))); tree c = omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_ORDERED); bool is_doacross = false; if (c && OMP_CLAUSE_ORDERED_EXPR (c)) { is_doacross = true; gimplify_omp_ctxp->loop_iter_var.create (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) * 2); } int collapse = 1, tile = 0; c = omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_COLLAPSE); if (c) collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (c)); c = omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_TILE); if (c) tile = list_length (OMP_CLAUSE_TILE_LIST (c)); for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++) { t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); decl = TREE_OPERAND (t, 0); gcc_assert (DECL_P (decl)); gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (decl)) || POINTER_TYPE_P (TREE_TYPE (decl))); if (is_doacross) { if (TREE_CODE (for_stmt) == OMP_FOR && OMP_FOR_ORIG_DECLS (for_stmt)) { tree orig_decl = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i); if (TREE_CODE (orig_decl) == TREE_LIST) { orig_decl = TREE_PURPOSE (orig_decl); if (!orig_decl) orig_decl = decl; } gimplify_omp_ctxp->loop_iter_var.quick_push (orig_decl); } else gimplify_omp_ctxp->loop_iter_var.quick_push (decl); gimplify_omp_ctxp->loop_iter_var.quick_push (decl); } /* Make sure the iteration variable is private. */ tree c = NULL_TREE; tree c2 = NULL_TREE; if (orig_for_stmt != for_stmt) { /* Preserve this information until we gimplify the inner simd. */ if (has_decl_expr && bitmap_bit_p (has_decl_expr, DECL_UID (decl))) TREE_PRIVATE (t) = 1; } else if (ort == ORT_SIMD) { splay_tree_node n = splay_tree_lookup (gimplify_omp_ctxp->variables, (splay_tree_key) decl); omp_is_private (gimplify_omp_ctxp, decl, 1 + (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) != 1)); if (n != NULL && (n->value & GOVD_DATA_SHARE_CLASS) != 0) { omp_notice_variable (gimplify_omp_ctxp, decl, true); if (n->value & GOVD_LASTPRIVATE_CONDITIONAL) for (tree c3 = omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_LASTPRIVATE); c3; c3 = omp_find_clause (OMP_CLAUSE_CHAIN (c3), OMP_CLAUSE_LASTPRIVATE)) if (OMP_CLAUSE_DECL (c3) == decl) { warning_at (OMP_CLAUSE_LOCATION (c3), 0, "conditional %<lastprivate%> on loop " "iterator %qD ignored", decl); OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c3) = 0; n->value &= ~GOVD_LASTPRIVATE_CONDITIONAL; } } else if (TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1 && !loop_p) { c = build_omp_clause (input_location, OMP_CLAUSE_LINEAR); OMP_CLAUSE_LINEAR_NO_COPYIN (c) = 1; unsigned int flags = GOVD_LINEAR | GOVD_EXPLICIT | GOVD_SEEN; if ((has_decl_expr && bitmap_bit_p (has_decl_expr, DECL_UID (decl))) || TREE_PRIVATE (t)) { OMP_CLAUSE_LINEAR_NO_COPYOUT (c) = 1; flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER; } struct gimplify_omp_ctx *outer = gimplify_omp_ctxp->outer_context; if (outer && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)) { if (outer->region_type == ORT_WORKSHARE && outer->combined_loop) { n = splay_tree_lookup (outer->variables, (splay_tree_key)decl); if (n != NULL && (n->value & GOVD_LOCAL) != 0) { OMP_CLAUSE_LINEAR_NO_COPYOUT (c) = 1; flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER; } else { struct gimplify_omp_ctx *octx = outer->outer_context; if (octx && octx->region_type == ORT_COMBINED_PARALLEL && octx->outer_context && (octx->outer_context->region_type == ORT_WORKSHARE) && octx->outer_context->combined_loop) { octx = octx->outer_context; n = splay_tree_lookup (octx->variables, (splay_tree_key)decl); if (n != NULL && (n->value & GOVD_LOCAL) != 0) { OMP_CLAUSE_LINEAR_NO_COPYOUT (c) = 1; flags |= GOVD_LINEAR_LASTPRIVATE_NO_OUTER; } } } } } OMP_CLAUSE_DECL (c) = decl; OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (for_stmt); OMP_FOR_CLAUSES (for_stmt) = c; omp_add_variable (gimplify_omp_ctxp, decl, flags); if (outer && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)) { if (outer->region_type == ORT_WORKSHARE && outer->combined_loop) { if (outer->outer_context && (outer->outer_context->region_type == ORT_COMBINED_PARALLEL)) outer = outer->outer_context; else if (omp_check_private (outer, decl, false)) outer = NULL; } else if (((outer->region_type & ORT_TASKLOOP) == ORT_TASKLOOP) && outer->combined_loop && !omp_check_private (gimplify_omp_ctxp, decl, false)) ; else if (outer->region_type != ORT_COMBINED_PARALLEL) { omp_notice_variable (outer, decl, true); outer = NULL; } if (outer) { n = splay_tree_lookup (outer->variables, (splay_tree_key)decl); if (n == NULL || (n->value & GOVD_DATA_SHARE_CLASS) == 0) { omp_add_variable (outer, decl, GOVD_LASTPRIVATE | GOVD_SEEN); if (outer->region_type == ORT_COMBINED_PARALLEL && outer->outer_context && (outer->outer_context->region_type == ORT_WORKSHARE) && outer->outer_context->combined_loop) { outer = outer->outer_context; n = splay_tree_lookup (outer->variables, (splay_tree_key)decl); if (omp_check_private (outer, decl, false)) outer = NULL; else if (n == NULL || ((n->value & GOVD_DATA_SHARE_CLASS) == 0)) omp_add_variable (outer, decl, GOVD_LASTPRIVATE | GOVD_SEEN); else outer = NULL; } if (outer && outer->outer_context && ((outer->outer_context->region_type & ORT_COMBINED_TEAMS) == ORT_COMBINED_TEAMS || (((outer->region_type & ORT_TASKLOOP) == ORT_TASKLOOP) && (outer->outer_context->region_type == ORT_COMBINED_PARALLEL)))) { outer = outer->outer_context; n = splay_tree_lookup (outer->variables, (splay_tree_key)decl); if (n == NULL || (n->value & GOVD_DATA_SHARE_CLASS) == 0) omp_add_variable (outer, decl, GOVD_SHARED | GOVD_SEEN); else outer = NULL; } if (outer && outer->outer_context) omp_notice_variable (outer->outer_context, decl, true); } } } } else { bool lastprivate = (!has_decl_expr || !bitmap_bit_p (has_decl_expr, DECL_UID (decl))); if (TREE_PRIVATE (t)) lastprivate = false; if (loop_p && OMP_FOR_ORIG_DECLS (for_stmt)) { tree elt = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i); if (TREE_CODE (elt) == TREE_LIST && TREE_PURPOSE (elt)) lastprivate = false; } struct gimplify_omp_ctx *outer = gimplify_omp_ctxp->outer_context; if (outer && lastprivate) { if (outer->region_type == ORT_WORKSHARE && outer->combined_loop) { n = splay_tree_lookup (outer->variables, (splay_tree_key)decl); if (n != NULL && (n->value & GOVD_LOCAL) != 0) { lastprivate = false; outer = NULL; } else if (outer->outer_context && (outer->outer_context->region_type == ORT_COMBINED_PARALLEL)) outer = outer->outer_context; else if (omp_check_private (outer, decl, false)) outer = NULL; } else if (((outer->region_type & ORT_TASKLOOP) == ORT_TASKLOOP) && outer->combined_loop && !omp_check_private (gimplify_omp_ctxp, decl, false)) ; else if (outer->region_type != ORT_COMBINED_PARALLEL) { omp_notice_variable (outer, decl, true); outer = NULL; } if (outer) { n = splay_tree_lookup (outer->variables, (splay_tree_key)decl); if (n == NULL || (n->value & GOVD_DATA_SHARE_CLASS) == 0) { omp_add_variable (outer, decl, GOVD_LASTPRIVATE | GOVD_SEEN); if (outer->region_type == ORT_COMBINED_PARALLEL && outer->outer_context && (outer->outer_context->region_type == ORT_WORKSHARE) && outer->outer_context->combined_loop) { outer = outer->outer_context; n = splay_tree_lookup (outer->variables, (splay_tree_key)decl); if (omp_check_private (outer, decl, false)) outer = NULL; else if (n == NULL || ((n->value & GOVD_DATA_SHARE_CLASS) == 0)) omp_add_variable (outer, decl, GOVD_LASTPRIVATE | GOVD_SEEN); else outer = NULL; } if (outer && outer->outer_context && ((outer->outer_context->region_type & ORT_COMBINED_TEAMS) == ORT_COMBINED_TEAMS || (((outer->region_type & ORT_TASKLOOP) == ORT_TASKLOOP) && (outer->outer_context->region_type == ORT_COMBINED_PARALLEL)))) { outer = outer->outer_context; n = splay_tree_lookup (outer->variables, (splay_tree_key)decl); if (n == NULL || (n->value & GOVD_DATA_SHARE_CLASS) == 0) omp_add_variable (outer, decl, GOVD_SHARED | GOVD_SEEN); else outer = NULL; } if (outer && outer->outer_context) omp_notice_variable (outer->outer_context, decl, true); } } } c = build_omp_clause (input_location, lastprivate ? OMP_CLAUSE_LASTPRIVATE : OMP_CLAUSE_PRIVATE); OMP_CLAUSE_DECL (c) = decl; OMP_CLAUSE_CHAIN (c) = OMP_FOR_CLAUSES (for_stmt); OMP_FOR_CLAUSES (for_stmt) = c; omp_add_variable (gimplify_omp_ctxp, decl, (lastprivate ? GOVD_LASTPRIVATE : GOVD_PRIVATE) | GOVD_EXPLICIT | GOVD_SEEN); c = NULL_TREE; } } else if (omp_is_private (gimplify_omp_ctxp, decl, 0)) { omp_notice_variable (gimplify_omp_ctxp, decl, true); splay_tree_node n = splay_tree_lookup (gimplify_omp_ctxp->variables, (splay_tree_key) decl); if (n && (n->value & GOVD_LASTPRIVATE_CONDITIONAL)) for (tree c3 = omp_find_clause (OMP_FOR_CLAUSES (for_stmt), OMP_CLAUSE_LASTPRIVATE); c3; c3 = omp_find_clause (OMP_CLAUSE_CHAIN (c3), OMP_CLAUSE_LASTPRIVATE)) if (OMP_CLAUSE_DECL (c3) == decl) { warning_at (OMP_CLAUSE_LOCATION (c3), 0, "conditional %<lastprivate%> on loop " "iterator %qD ignored", decl); OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c3) = 0; n->value &= ~GOVD_LASTPRIVATE_CONDITIONAL; } } else omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN); /* If DECL is not a gimple register, create a temporary variable to act as an iteration counter. This is valid, since DECL cannot be modified in the body of the loop. Similarly for any iteration vars in simd with collapse > 1 where the iterator vars must be lastprivate. */ if (orig_for_stmt != for_stmt) var = decl; else if (!is_gimple_reg (decl) || (ort == ORT_SIMD && TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) > 1)) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; /* Make sure omp_add_variable is not called on it prematurely. We call it ourselves a few lines later. */ gimplify_omp_ctxp = NULL; var = create_tmp_var (TREE_TYPE (decl), get_name (decl)); gimplify_omp_ctxp = ctx; TREE_OPERAND (t, 0) = var; gimplify_seq_add_stmt (&for_body, gimple_build_assign (decl, var)); if (ort == ORT_SIMD && TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)) == 1) { c2 = build_omp_clause (input_location, OMP_CLAUSE_LINEAR); OMP_CLAUSE_LINEAR_NO_COPYIN (c2) = 1; OMP_CLAUSE_LINEAR_NO_COPYOUT (c2) = 1; OMP_CLAUSE_DECL (c2) = var; OMP_CLAUSE_CHAIN (c2) = OMP_FOR_CLAUSES (for_stmt); OMP_FOR_CLAUSES (for_stmt) = c2; omp_add_variable (gimplify_omp_ctxp, var, GOVD_LINEAR | GOVD_EXPLICIT | GOVD_SEEN); if (c == NULL_TREE) { c = c2; c2 = NULL_TREE; } } else omp_add_variable (gimplify_omp_ctxp, var, GOVD_PRIVATE | GOVD_SEEN); } else var = decl; if (TREE_CODE (TREE_OPERAND (t, 1)) == TREE_VEC) { tree lb = TREE_OPERAND (t, 1); tret = gimplify_expr (&TREE_VEC_ELT (lb, 1), &for_pre_body, NULL, is_gimple_val, fb_rvalue, false); ret = MIN (ret, tret); tret = gimplify_expr (&TREE_VEC_ELT (lb, 2), &for_pre_body, NULL, is_gimple_val, fb_rvalue, false); } else tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL, is_gimple_val, fb_rvalue, false); ret = MIN (ret, tret); if (ret == GS_ERROR) return ret; /* Handle OMP_FOR_COND. */ t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i); gcc_assert (COMPARISON_CLASS_P (t)); gcc_assert (TREE_OPERAND (t, 0) == decl); if (TREE_CODE (TREE_OPERAND (t, 1)) == TREE_VEC) { tree ub = TREE_OPERAND (t, 1); tret = gimplify_expr (&TREE_VEC_ELT (ub, 1), &for_pre_body, NULL, is_gimple_val, fb_rvalue, false); ret = MIN (ret, tret); tret = gimplify_expr (&TREE_VEC_ELT (ub, 2), &for_pre_body, NULL, is_gimple_val, fb_rvalue, false); } else tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL, is_gimple_val, fb_rvalue, false); ret = MIN (ret, tret); /* Handle OMP_FOR_INCR. */ t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i); switch (TREE_CODE (t)) { case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: { tree decl = TREE_OPERAND (t, 0); /* c_omp_for_incr_canonicalize_ptr() should have been called to massage things appropriately. */ gcc_assert (!POINTER_TYPE_P (TREE_TYPE (decl))); if (orig_for_stmt != for_stmt) break; t = build_int_cst (TREE_TYPE (decl), 1); if (c) OMP_CLAUSE_LINEAR_STEP (c) = t; t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t); t = build2 (MODIFY_EXPR, TREE_TYPE (var), var, t); TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t; break; } case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: /* c_omp_for_incr_canonicalize_ptr() should have been called to massage things appropriately. */ gcc_assert (!POINTER_TYPE_P (TREE_TYPE (decl))); if (orig_for_stmt != for_stmt) break; t = build_int_cst (TREE_TYPE (decl), -1); if (c) OMP_CLAUSE_LINEAR_STEP (c) = t; t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t); t = build2 (MODIFY_EXPR, TREE_TYPE (var), var, t); TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i) = t; break; case MODIFY_EXPR: gcc_assert (TREE_OPERAND (t, 0) == decl); TREE_OPERAND (t, 0) = var; t = TREE_OPERAND (t, 1); switch (TREE_CODE (t)) { case PLUS_EXPR: if (TREE_OPERAND (t, 1) == decl) { TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0); TREE_OPERAND (t, 0) = var; break; } /* Fallthru. */ case MINUS_EXPR: case POINTER_PLUS_EXPR: gcc_assert (TREE_OPERAND (t, 0) == decl); TREE_OPERAND (t, 0) = var; break; default: gcc_unreachable (); } tret = gimplify_expr (&TREE_OPERAND (t, 1), &for_pre_body, NULL, is_gimple_val, fb_rvalue, false); ret = MIN (ret, tret); if (c) { tree step = TREE_OPERAND (t, 1); tree stept = TREE_TYPE (decl); if (POINTER_TYPE_P (stept)) stept = sizetype; step = fold_convert (stept, step); if (TREE_CODE (t) == MINUS_EXPR) step = fold_build1 (NEGATE_EXPR, stept, step); OMP_CLAUSE_LINEAR_STEP (c) = step; if (step != TREE_OPERAND (t, 1)) { tret = gimplify_expr (&OMP_CLAUSE_LINEAR_STEP (c), &for_pre_body, NULL, is_gimple_val, fb_rvalue, false); ret = MIN (ret, tret); } } break; default: gcc_unreachable (); } if (c2) { gcc_assert (c); OMP_CLAUSE_LINEAR_STEP (c2) = OMP_CLAUSE_LINEAR_STEP (c); } if ((var != decl || collapse > 1 || tile) && orig_for_stmt == for_stmt) { for (c = OMP_FOR_CLAUSES (for_stmt); c ; c = OMP_CLAUSE_CHAIN (c)) if (((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) == NULL) || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c) && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) == NULL)) && OMP_CLAUSE_DECL (c) == decl) { if (is_doacross && (collapse == 1 || i >= collapse)) t = var; else { t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (t, 0) == var); t = TREE_OPERAND (t, 1); gcc_assert (TREE_CODE (t) == PLUS_EXPR || TREE_CODE (t) == MINUS_EXPR || TREE_CODE (t) == POINTER_PLUS_EXPR); gcc_assert (TREE_OPERAND (t, 0) == var); t = build2 (TREE_CODE (t), TREE_TYPE (decl), is_doacross ? var : decl, TREE_OPERAND (t, 1)); } gimple_seq *seq; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE) seq = &OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c); else seq = &OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c); push_gimplify_context (); gimplify_assign (decl, t, seq); gimple *bind = NULL; if (gimplify_ctxp->temps) { bind = gimple_build_bind (NULL_TREE, *seq, NULL_TREE); *seq = NULL; gimplify_seq_add_stmt (seq, bind); } pop_gimplify_context (bind); } } if (OMP_FOR_NON_RECTANGULAR (for_stmt) && var != decl) for (int j = i + 1; j < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); j++) { t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), j); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); if (TREE_CODE (TREE_OPERAND (t, 1)) == TREE_VEC && TREE_VEC_ELT (TREE_OPERAND (t, 1), 0) == decl) TREE_VEC_ELT (TREE_OPERAND (t, 1), 0) = var; t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), j); gcc_assert (COMPARISON_CLASS_P (t)); if (TREE_CODE (TREE_OPERAND (t, 1)) == TREE_VEC && TREE_VEC_ELT (TREE_OPERAND (t, 1), 0) == decl) TREE_VEC_ELT (TREE_OPERAND (t, 1), 0) = var; } } BITMAP_FREE (has_decl_expr); if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP || (loop_p && orig_for_stmt == for_stmt)) { push_gimplify_context (); if (TREE_CODE (OMP_FOR_BODY (orig_for_stmt)) != BIND_EXPR) { OMP_FOR_BODY (orig_for_stmt) = build3 (BIND_EXPR, void_type_node, NULL, OMP_FOR_BODY (orig_for_stmt), NULL); TREE_SIDE_EFFECTS (OMP_FOR_BODY (orig_for_stmt)) = 1; } } gimple *g = gimplify_and_return_first (OMP_FOR_BODY (orig_for_stmt), &for_body); if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP || (loop_p && orig_for_stmt == for_stmt)) { if (gimple_code (g) == GIMPLE_BIND) pop_gimplify_context (g); else pop_gimplify_context (NULL); } if (orig_for_stmt != for_stmt) for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++) { t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i); decl = TREE_OPERAND (t, 0); struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP) gimplify_omp_ctxp = ctx->outer_context; var = create_tmp_var (TREE_TYPE (decl), get_name (decl)); gimplify_omp_ctxp = ctx; omp_add_variable (gimplify_omp_ctxp, var, GOVD_PRIVATE | GOVD_SEEN); TREE_OPERAND (t, 0) = var; t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i); TREE_OPERAND (t, 1) = copy_node (TREE_OPERAND (t, 1)); TREE_OPERAND (TREE_OPERAND (t, 1), 0) = var; if (OMP_FOR_NON_RECTANGULAR (for_stmt)) for (int j = i + 1; j < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); j++) { t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), j); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); if (TREE_CODE (TREE_OPERAND (t, 1)) == TREE_VEC && TREE_VEC_ELT (TREE_OPERAND (t, 1), 0) == decl) { TREE_OPERAND (t, 1) = copy_node (TREE_OPERAND (t, 1)); TREE_VEC_ELT (TREE_OPERAND (t, 1), 0) = var; } t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), j); gcc_assert (COMPARISON_CLASS_P (t)); if (TREE_CODE (TREE_OPERAND (t, 1)) == TREE_VEC && TREE_VEC_ELT (TREE_OPERAND (t, 1), 0) == decl) { TREE_OPERAND (t, 1) = copy_node (TREE_OPERAND (t, 1)); TREE_VEC_ELT (TREE_OPERAND (t, 1), 0) = var; } } } gimplify_adjust_omp_clauses (pre_p, for_body, &OMP_FOR_CLAUSES (orig_for_stmt), TREE_CODE (orig_for_stmt)); int kind; switch (TREE_CODE (orig_for_stmt)) { case OMP_FOR: kind = GF_OMP_FOR_KIND_FOR; break; case OMP_SIMD: kind = GF_OMP_FOR_KIND_SIMD; break; case OMP_DISTRIBUTE: kind = GF_OMP_FOR_KIND_DISTRIBUTE; break; case OMP_TASKLOOP: kind = GF_OMP_FOR_KIND_TASKLOOP; break; case OACC_LOOP: kind = GF_OMP_FOR_KIND_OACC_LOOP; break; default: gcc_unreachable (); } if (loop_p && kind == GF_OMP_FOR_KIND_SIMD) { gimplify_seq_add_seq (pre_p, for_pre_body); for_pre_body = NULL; } gfor = gimple_build_omp_for (for_body, kind, OMP_FOR_CLAUSES (orig_for_stmt), TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)), for_pre_body); if (orig_for_stmt != for_stmt) gimple_omp_for_set_combined_p (gfor, true); if (gimplify_omp_ctxp && (gimplify_omp_ctxp->combined_loop || (gimplify_omp_ctxp->region_type == ORT_COMBINED_PARALLEL && gimplify_omp_ctxp->outer_context && gimplify_omp_ctxp->outer_context->combined_loop))) { gimple_omp_for_set_combined_into_p (gfor, true); if (gimplify_omp_ctxp->combined_loop) gcc_assert (TREE_CODE (orig_for_stmt) == OMP_SIMD); else gcc_assert (TREE_CODE (orig_for_stmt) == OMP_FOR); } for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++) { t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i); gimple_omp_for_set_index (gfor, i, TREE_OPERAND (t, 0)); gimple_omp_for_set_initial (gfor, i, TREE_OPERAND (t, 1)); t = TREE_VEC_ELT (OMP_FOR_COND (for_stmt), i); gimple_omp_for_set_cond (gfor, i, TREE_CODE (t)); gimple_omp_for_set_final (gfor, i, TREE_OPERAND (t, 1)); t = TREE_VEC_ELT (OMP_FOR_INCR (for_stmt), i); gimple_omp_for_set_incr (gfor, i, TREE_OPERAND (t, 1)); } /* OMP_TASKLOOP is gimplified as two GIMPLE_OMP_FOR taskloop constructs with GIMPLE_OMP_TASK sandwiched in between them. The outer taskloop stands for computing the number of iterations, counts for collapsed loops and holding taskloop specific clauses. The task construct stands for the effect of data sharing on the explicit task it creates and the inner taskloop stands for expansion of the static loop inside of the explicit task construct. */ if (TREE_CODE (orig_for_stmt) == OMP_TASKLOOP) { tree *gfor_clauses_ptr = gimple_omp_for_clauses_ptr (gfor); tree task_clauses = NULL_TREE; tree c = *gfor_clauses_ptr; tree *gtask_clauses_ptr = &task_clauses; tree outer_for_clauses = NULL_TREE; tree *gforo_clauses_ptr = &outer_for_clauses; for (; c; c = OMP_CLAUSE_CHAIN (c)) switch (OMP_CLAUSE_CODE (c)) { /* These clauses are allowed on task, move them there. */ case OMP_CLAUSE_SHARED: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_IF: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_FINAL: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_PRIORITY: case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_IN_REDUCTION: *gtask_clauses_ptr = c; gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (c); break; case OMP_CLAUSE_PRIVATE: if (OMP_CLAUSE_PRIVATE_TASKLOOP_IV (c)) { /* We want private on outer for and firstprivate on task. */ *gtask_clauses_ptr = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (*gtask_clauses_ptr) = OMP_CLAUSE_DECL (c); lang_hooks.decls.omp_finish_clause (*gtask_clauses_ptr, NULL, openacc); gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (*gtask_clauses_ptr); *gforo_clauses_ptr = c; gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (c); } else { *gtask_clauses_ptr = c; gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (c); } break; /* These clauses go into outer taskloop clauses. */ case OMP_CLAUSE_GRAINSIZE: case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_NOGROUP: *gforo_clauses_ptr = c; gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (c); break; /* Taskloop clause we duplicate on both taskloops. */ case OMP_CLAUSE_COLLAPSE: *gfor_clauses_ptr = c; gfor_clauses_ptr = &OMP_CLAUSE_CHAIN (c); *gforo_clauses_ptr = copy_node (c); gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (*gforo_clauses_ptr); break; /* For lastprivate, keep the clause on inner taskloop, and add a shared clause on task. If the same decl is also firstprivate, add also firstprivate clause on the inner taskloop. */ case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c)) { /* For taskloop C++ lastprivate IVs, we want: 1) private on outer taskloop 2) firstprivate and shared on task 3) lastprivate on inner taskloop */ *gtask_clauses_ptr = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (*gtask_clauses_ptr) = OMP_CLAUSE_DECL (c); lang_hooks.decls.omp_finish_clause (*gtask_clauses_ptr, NULL, openacc); gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (*gtask_clauses_ptr); OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c) = 1; *gforo_clauses_ptr = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_PRIVATE); OMP_CLAUSE_DECL (*gforo_clauses_ptr) = OMP_CLAUSE_DECL (c); OMP_CLAUSE_PRIVATE_TASKLOOP_IV (*gforo_clauses_ptr) = 1; TREE_TYPE (*gforo_clauses_ptr) = TREE_TYPE (c); gforo_clauses_ptr = &OMP_CLAUSE_CHAIN (*gforo_clauses_ptr); } *gfor_clauses_ptr = c; gfor_clauses_ptr = &OMP_CLAUSE_CHAIN (c); *gtask_clauses_ptr = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_SHARED); OMP_CLAUSE_DECL (*gtask_clauses_ptr) = OMP_CLAUSE_DECL (c); if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)) OMP_CLAUSE_SHARED_FIRSTPRIVATE (*gtask_clauses_ptr) = 1; gtask_clauses_ptr = &OMP_CLAUSE_CHAIN (*gtask_clauses_ptr); break; default: gcc_unreachable (); } *gfor_clauses_ptr = NULL_TREE; *gtask_clauses_ptr = NULL_TREE; *gforo_clauses_ptr = NULL_TREE; g = gimple_build_bind (NULL_TREE, gfor, NULL_TREE); g = gimple_build_omp_task (g, task_clauses, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); gimple_omp_task_set_taskloop_p (g, true); g = gimple_build_bind (NULL_TREE, g, NULL_TREE); gomp_for *gforo = gimple_build_omp_for (g, GF_OMP_FOR_KIND_TASKLOOP, outer_for_clauses, gimple_omp_for_collapse (gfor), gimple_omp_for_pre_body (gfor)); gimple_omp_for_set_pre_body (gfor, NULL); gimple_omp_for_set_combined_p (gforo, true); gimple_omp_for_set_combined_into_p (gfor, true); for (i = 0; i < (int) gimple_omp_for_collapse (gfor); i++) { tree type = TREE_TYPE (gimple_omp_for_index (gfor, i)); tree v = create_tmp_var (type); gimple_omp_for_set_index (gforo, i, v); t = unshare_expr (gimple_omp_for_initial (gfor, i)); gimple_omp_for_set_initial (gforo, i, t); gimple_omp_for_set_cond (gforo, i, gimple_omp_for_cond (gfor, i)); t = unshare_expr (gimple_omp_for_final (gfor, i)); gimple_omp_for_set_final (gforo, i, t); t = unshare_expr (gimple_omp_for_incr (gfor, i)); gcc_assert (TREE_OPERAND (t, 0) == gimple_omp_for_index (gfor, i)); TREE_OPERAND (t, 0) = v; gimple_omp_for_set_incr (gforo, i, t); t = build_omp_clause (input_location, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_DECL (t) = v; OMP_CLAUSE_CHAIN (t) = gimple_omp_for_clauses (gforo); gimple_omp_for_set_clauses (gforo, t); if (OMP_FOR_NON_RECTANGULAR (for_stmt)) { tree *p1 = NULL, *p2 = NULL; t = gimple_omp_for_initial (gforo, i); if (TREE_CODE (t) == TREE_VEC) p1 = &TREE_VEC_ELT (t, 0); t = gimple_omp_for_final (gforo, i); if (TREE_CODE (t) == TREE_VEC) { if (p1) p2 = &TREE_VEC_ELT (t, 0); else p1 = &TREE_VEC_ELT (t, 0); } if (p1) { int j; for (j = 0; j < i; j++) if (*p1 == gimple_omp_for_index (gfor, j)) { *p1 = gimple_omp_for_index (gforo, j); if (p2) *p2 = *p1; break; } gcc_assert (j < i); } } } gimplify_seq_add_stmt (pre_p, gforo); } else gimplify_seq_add_stmt (pre_p, gfor); if (TREE_CODE (orig_for_stmt) == OMP_FOR) { struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp; unsigned lastprivate_conditional = 0; while (ctx && (ctx->region_type == ORT_TARGET_DATA || ctx->region_type == ORT_TASKGROUP)) ctx = ctx->outer_context; if (ctx && (ctx->region_type & ORT_PARALLEL) != 0) for (tree c = gimple_omp_for_clauses (gfor); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)) ++lastprivate_conditional; if (lastprivate_conditional) { struct omp_for_data fd; omp_extract_for_data (gfor, &fd, NULL); tree type = build_array_type_nelts (unsigned_type_for (fd.iter_type), lastprivate_conditional); tree var = create_tmp_var_raw (type); tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__CONDTEMP_); OMP_CLAUSE_DECL (c) = var; OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (gfor); gimple_omp_for_set_clauses (gfor, c); omp_add_variable (ctx, var, GOVD_CONDTEMP | GOVD_SEEN); } } else if (TREE_CODE (orig_for_stmt) == OMP_SIMD) { unsigned lastprivate_conditional = 0; for (tree c = gimple_omp_for_clauses (gfor); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)) ++lastprivate_conditional; if (lastprivate_conditional) { struct omp_for_data fd; omp_extract_for_data (gfor, &fd, NULL); tree type = unsigned_type_for (fd.iter_type); while (lastprivate_conditional--) { tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__CONDTEMP_); OMP_CLAUSE_DECL (c) = create_tmp_var (type); OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (gfor); gimple_omp_for_set_clauses (gfor, c); } } } if (ret != GS_ALL_DONE) return GS_ERROR; *expr_p = NULL_TREE; return GS_ALL_DONE; } /* Helper for gimplify_omp_loop, called through walk_tree. */ static tree replace_reduction_placeholders (tree *tp, int *walk_subtrees, void *data) { if (DECL_P (*tp)) { tree *d = (tree *) data; if (*tp == OMP_CLAUSE_REDUCTION_PLACEHOLDER (d[0])) { *tp = OMP_CLAUSE_REDUCTION_PLACEHOLDER (d[1]); *walk_subtrees = 0; } else if (*tp == OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (d[0])) { *tp = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (d[1]); *walk_subtrees = 0; } } return NULL_TREE; } /* Gimplify the gross structure of an OMP_LOOP statement. */ static enum gimplify_status gimplify_omp_loop (tree *expr_p, gimple_seq *pre_p) { tree for_stmt = *expr_p; tree clauses = OMP_FOR_CLAUSES (for_stmt); struct gimplify_omp_ctx *octx = gimplify_omp_ctxp; enum omp_clause_bind_kind kind = OMP_CLAUSE_BIND_THREAD; int i; /* If order is not present, the behavior is as if order(concurrent) appeared. */ tree order = omp_find_clause (clauses, OMP_CLAUSE_ORDER); if (order == NULL_TREE) { order = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_ORDER); OMP_CLAUSE_CHAIN (order) = clauses; OMP_FOR_CLAUSES (for_stmt) = clauses = order; } tree bind = omp_find_clause (clauses, OMP_CLAUSE_BIND); if (bind == NULL_TREE) { if (!flag_openmp) /* flag_openmp_simd */ ; else if (octx && (octx->region_type & ORT_TEAMS) != 0) kind = OMP_CLAUSE_BIND_TEAMS; else if (octx && (octx->region_type & ORT_PARALLEL) != 0) kind = OMP_CLAUSE_BIND_PARALLEL; else { for (; octx; octx = octx->outer_context) { if ((octx->region_type & ORT_ACC) != 0 || octx->region_type == ORT_NONE || octx->region_type == ORT_IMPLICIT_TARGET) continue; break; } if (octx == NULL && !in_omp_construct) error_at (EXPR_LOCATION (for_stmt), "%<bind%> clause not specified on a %<loop%> " "construct not nested inside another OpenMP construct"); } bind = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_BIND); OMP_CLAUSE_CHAIN (bind) = clauses; OMP_CLAUSE_BIND_KIND (bind) = kind; OMP_FOR_CLAUSES (for_stmt) = bind; } else switch (OMP_CLAUSE_BIND_KIND (bind)) { case OMP_CLAUSE_BIND_THREAD: break; case OMP_CLAUSE_BIND_PARALLEL: if (!flag_openmp) /* flag_openmp_simd */ { OMP_CLAUSE_BIND_KIND (bind) = OMP_CLAUSE_BIND_THREAD; break; } for (; octx; octx = octx->outer_context) if (octx->region_type == ORT_SIMD && omp_find_clause (octx->clauses, OMP_CLAUSE_BIND) == NULL_TREE) { error_at (EXPR_LOCATION (for_stmt), "%<bind(parallel)%> on a %<loop%> construct nested " "inside %<simd%> construct"); OMP_CLAUSE_BIND_KIND (bind) = OMP_CLAUSE_BIND_THREAD; break; } kind = OMP_CLAUSE_BIND_PARALLEL; break; case OMP_CLAUSE_BIND_TEAMS: if (!flag_openmp) /* flag_openmp_simd */ { OMP_CLAUSE_BIND_KIND (bind) = OMP_CLAUSE_BIND_THREAD; break; } if ((octx && octx->region_type != ORT_IMPLICIT_TARGET && octx->region_type != ORT_NONE && (octx->region_type & ORT_TEAMS) == 0) || in_omp_construct) { error_at (EXPR_LOCATION (for_stmt), "%<bind(teams)%> on a %<loop%> region not strictly " "nested inside of a %<teams%> region"); OMP_CLAUSE_BIND_KIND (bind) = OMP_CLAUSE_BIND_THREAD; break; } kind = OMP_CLAUSE_BIND_TEAMS; break; default: gcc_unreachable (); } for (tree *pc = &OMP_FOR_CLAUSES (for_stmt); *pc; ) switch (OMP_CLAUSE_CODE (*pc)) { case OMP_CLAUSE_REDUCTION: if (OMP_CLAUSE_REDUCTION_INSCAN (*pc)) { error_at (OMP_CLAUSE_LOCATION (*pc), "%<inscan%> %<reduction%> clause on " "%qs construct", "loop"); OMP_CLAUSE_REDUCTION_INSCAN (*pc) = 0; } if (OMP_CLAUSE_REDUCTION_TASK (*pc)) { error_at (OMP_CLAUSE_LOCATION (*pc), "invalid %<task%> reduction modifier on construct " "other than %<parallel%>, %<for%> or %<sections%>"); OMP_CLAUSE_REDUCTION_TASK (*pc) = 0; } pc = &OMP_CLAUSE_CHAIN (*pc); break; case OMP_CLAUSE_LASTPRIVATE: for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++) { tree t = TREE_VEC_ELT (OMP_FOR_INIT (for_stmt), i); gcc_assert (TREE_CODE (t) == MODIFY_EXPR); if (OMP_CLAUSE_DECL (*pc) == TREE_OPERAND (t, 0)) break; if (OMP_FOR_ORIG_DECLS (for_stmt) && TREE_CODE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i)) == TREE_LIST && TREE_PURPOSE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i))) { tree orig = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i); if (OMP_CLAUSE_DECL (*pc) == TREE_PURPOSE (orig)) break; } } if (i == TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt))) { error_at (OMP_CLAUSE_LOCATION (*pc), "%<lastprivate%> clause on a %<loop%> construct refers " "to a variable %qD which is not the loop iterator", OMP_CLAUSE_DECL (*pc)); *pc = OMP_CLAUSE_CHAIN (*pc); break; } pc = &OMP_CLAUSE_CHAIN (*pc); break; default: pc = &OMP_CLAUSE_CHAIN (*pc); break; } TREE_SET_CODE (for_stmt, OMP_SIMD); int last; switch (kind) { case OMP_CLAUSE_BIND_THREAD: last = 0; break; case OMP_CLAUSE_BIND_PARALLEL: last = 1; break; case OMP_CLAUSE_BIND_TEAMS: last = 2; break; } for (int pass = 1; pass <= last; pass++) { if (pass == 2) { tree bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); append_to_statement_list (*expr_p, &BIND_EXPR_BODY (bind)); *expr_p = make_node (OMP_PARALLEL); TREE_TYPE (*expr_p) = void_type_node; OMP_PARALLEL_BODY (*expr_p) = bind; OMP_PARALLEL_COMBINED (*expr_p) = 1; SET_EXPR_LOCATION (*expr_p, EXPR_LOCATION (for_stmt)); tree *pc = &OMP_PARALLEL_CLAUSES (*expr_p); for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (for_stmt)); i++) if (OMP_FOR_ORIG_DECLS (for_stmt) && (TREE_CODE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i)) == TREE_LIST)) { tree elt = TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (for_stmt), i); if (TREE_PURPOSE (elt) && TREE_VALUE (elt)) { *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (*pc) = TREE_VALUE (elt); pc = &OMP_CLAUSE_CHAIN (*pc); } } } tree t = make_node (pass == 2 ? OMP_DISTRIBUTE : OMP_FOR); tree *pc = &OMP_FOR_CLAUSES (t); TREE_TYPE (t) = void_type_node; OMP_FOR_BODY (t) = *expr_p; SET_EXPR_LOCATION (t, EXPR_LOCATION (for_stmt)); for (tree c = OMP_FOR_CLAUSES (for_stmt); c; c = OMP_CLAUSE_CHAIN (c)) switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_BIND: case OMP_CLAUSE_ORDER: case OMP_CLAUSE_COLLAPSE: *pc = copy_node (c); pc = &OMP_CLAUSE_CHAIN (*pc); break; case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_FIRSTPRIVATE: /* Only needed on innermost. */ break; case OMP_CLAUSE_LASTPRIVATE: if (OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c) && pass != last) { *pc = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (*pc) = OMP_CLAUSE_DECL (c); lang_hooks.decls.omp_finish_clause (*pc, NULL, false); pc = &OMP_CLAUSE_CHAIN (*pc); } *pc = copy_node (c); OMP_CLAUSE_LASTPRIVATE_STMT (*pc) = NULL_TREE; TREE_TYPE (*pc) = unshare_expr (TREE_TYPE (c)); if (OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c)) { if (pass != last) OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (*pc) = 1; else lang_hooks.decls.omp_finish_clause (*pc, NULL, false); OMP_CLAUSE_LASTPRIVATE_LOOP_IV (*pc) = 0; } pc = &OMP_CLAUSE_CHAIN (*pc); break; case OMP_CLAUSE_REDUCTION: *pc = copy_node (c); OMP_CLAUSE_DECL (*pc) = unshare_expr (OMP_CLAUSE_DECL (c)); TREE_TYPE (*pc) = unshare_expr (TREE_TYPE (c)); OMP_CLAUSE_REDUCTION_INIT (*pc) = unshare_expr (OMP_CLAUSE_REDUCTION_INIT (c)); OMP_CLAUSE_REDUCTION_MERGE (*pc) = unshare_expr (OMP_CLAUSE_REDUCTION_MERGE (c)); if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (*pc)) { OMP_CLAUSE_REDUCTION_PLACEHOLDER (*pc) = copy_node (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)); if (OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (*pc)) OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (*pc) = copy_node (OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)); tree nc = *pc; tree data[2] = { c, nc }; walk_tree_without_duplicates (&OMP_CLAUSE_REDUCTION_INIT (nc), replace_reduction_placeholders, data); walk_tree_without_duplicates (&OMP_CLAUSE_REDUCTION_MERGE (nc), replace_reduction_placeholders, data); } pc = &OMP_CLAUSE_CHAIN (*pc); break; default: gcc_unreachable (); } *pc = NULL_TREE; *expr_p = t; } return gimplify_omp_for (expr_p, pre_p); } /* Helper function of optimize_target_teams, find OMP_TEAMS inside of OMP_TARGET's body. */ static tree find_omp_teams (tree *tp, int *walk_subtrees, void *) { *walk_subtrees = 0; switch (TREE_CODE (*tp)) { case OMP_TEAMS: return *tp; case BIND_EXPR: case STATEMENT_LIST: *walk_subtrees = 1; break; default: break; } return NULL_TREE; } /* Helper function of optimize_target_teams, determine if the expression can be computed safely before the target construct on the host. */ static tree computable_teams_clause (tree *tp, int *walk_subtrees, void *) { splay_tree_node n; if (TYPE_P (*tp)) { *walk_subtrees = 0; return NULL_TREE; } switch (TREE_CODE (*tp)) { case VAR_DECL: case PARM_DECL: case RESULT_DECL: *walk_subtrees = 0; if (error_operand_p (*tp) || !INTEGRAL_TYPE_P (TREE_TYPE (*tp)) || DECL_HAS_VALUE_EXPR_P (*tp) || DECL_THREAD_LOCAL_P (*tp) || TREE_SIDE_EFFECTS (*tp) || TREE_THIS_VOLATILE (*tp)) return *tp; if (is_global_var (*tp) && (lookup_attribute ("omp declare target", DECL_ATTRIBUTES (*tp)) || lookup_attribute ("omp declare target link", DECL_ATTRIBUTES (*tp)))) return *tp; if (VAR_P (*tp) && !DECL_SEEN_IN_BIND_EXPR_P (*tp) && !is_global_var (*tp) && decl_function_context (*tp) == current_function_decl) return *tp; n = splay_tree_lookup (gimplify_omp_ctxp->variables, (splay_tree_key) *tp); if (n == NULL) { if (gimplify_omp_ctxp->defaultmap[GDMK_SCALAR] & GOVD_FIRSTPRIVATE) return NULL_TREE; return *tp; } else if (n->value & GOVD_LOCAL) return *tp; else if (n->value & GOVD_FIRSTPRIVATE) return NULL_TREE; else if ((n->value & (GOVD_MAP | GOVD_MAP_ALWAYS_TO)) == (GOVD_MAP | GOVD_MAP_ALWAYS_TO)) return NULL_TREE; return *tp; case INTEGER_CST: if (!INTEGRAL_TYPE_P (TREE_TYPE (*tp))) return *tp; return NULL_TREE; case TARGET_EXPR: if (TARGET_EXPR_INITIAL (*tp) || TREE_CODE (TARGET_EXPR_SLOT (*tp)) != VAR_DECL) return *tp; return computable_teams_clause (&TARGET_EXPR_SLOT (*tp), walk_subtrees, NULL); /* Allow some reasonable subset of integral arithmetics. */ case PLUS_EXPR: case MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case MIN_EXPR: case MAX_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: case NEGATE_EXPR: case ABS_EXPR: case BIT_NOT_EXPR: case NON_LVALUE_EXPR: CASE_CONVERT: if (!INTEGRAL_TYPE_P (TREE_TYPE (*tp))) return *tp; return NULL_TREE; /* And disallow anything else, except for comparisons. */ default: if (COMPARISON_CLASS_P (*tp)) return NULL_TREE; return *tp; } } /* Try to determine if the num_teams and/or thread_limit expressions can have their values determined already before entering the target construct. INTEGER_CSTs trivially are, integral decls that are firstprivate (explicitly or implicitly) or explicitly map(always, to:) or map(always, tofrom:) on the target region too, and expressions involving simple arithmetics on those too, function calls are not ok, dereferencing something neither etc. Add NUM_TEAMS and THREAD_LIMIT clauses to the OMP_CLAUSES of EXPR based on what we find: 0 stands for clause not specified at all, use implementation default -1 stands for value that can't be determined easily before entering the target construct. If teams construct is not present at all, use 1 for num_teams and 0 for thread_limit (only one team is involved, and the thread limit is implementation defined. */ static void optimize_target_teams (tree target, gimple_seq *pre_p) { tree body = OMP_BODY (target); tree teams = walk_tree (&body, find_omp_teams, NULL, NULL); tree num_teams = integer_zero_node; tree thread_limit = integer_zero_node; location_t num_teams_loc = EXPR_LOCATION (target); location_t thread_limit_loc = EXPR_LOCATION (target); tree c, *p, expr; struct gimplify_omp_ctx *target_ctx = gimplify_omp_ctxp; if (teams == NULL_TREE) num_teams = integer_one_node; else for (c = OMP_TEAMS_CLAUSES (teams); c; c = OMP_CLAUSE_CHAIN (c)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_TEAMS) { p = &num_teams; num_teams_loc = OMP_CLAUSE_LOCATION (c); } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREAD_LIMIT) { p = &thread_limit; thread_limit_loc = OMP_CLAUSE_LOCATION (c); } else continue; expr = OMP_CLAUSE_OPERAND (c, 0); if (TREE_CODE (expr) == INTEGER_CST) { *p = expr; continue; } if (walk_tree (&expr, computable_teams_clause, NULL, NULL)) { *p = integer_minus_one_node; continue; } *p = expr; gimplify_omp_ctxp = gimplify_omp_ctxp->outer_context; if (gimplify_expr (p, pre_p, NULL, is_gimple_val, fb_rvalue, false) == GS_ERROR) { gimplify_omp_ctxp = target_ctx; *p = integer_minus_one_node; continue; } gimplify_omp_ctxp = target_ctx; if (!DECL_P (expr) && TREE_CODE (expr) != TARGET_EXPR) OMP_CLAUSE_OPERAND (c, 0) = *p; } c = build_omp_clause (thread_limit_loc, OMP_CLAUSE_THREAD_LIMIT); OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = thread_limit; OMP_CLAUSE_CHAIN (c) = OMP_TARGET_CLAUSES (target); OMP_TARGET_CLAUSES (target) = c; c = build_omp_clause (num_teams_loc, OMP_CLAUSE_NUM_TEAMS); OMP_CLAUSE_NUM_TEAMS_EXPR (c) = num_teams; OMP_CLAUSE_CHAIN (c) = OMP_TARGET_CLAUSES (target); OMP_TARGET_CLAUSES (target) = c; } /* Gimplify the gross structure of several OMP constructs. */ static void gimplify_omp_workshare (tree *expr_p, gimple_seq *pre_p) { tree expr = *expr_p; gimple *stmt; gimple_seq body = NULL; enum omp_region_type ort; switch (TREE_CODE (expr)) { case OMP_SECTIONS: case OMP_SINGLE: ort = ORT_WORKSHARE; break; case OMP_TARGET: ort = OMP_TARGET_COMBINED (expr) ? ORT_COMBINED_TARGET : ORT_TARGET; break; case OACC_KERNELS: ort = ORT_ACC_KERNELS; break; case OACC_PARALLEL: ort = ORT_ACC_PARALLEL; break; case OACC_SERIAL: ort = ORT_ACC_SERIAL; break; case OACC_DATA: ort = ORT_ACC_DATA; break; case OMP_TARGET_DATA: ort = ORT_TARGET_DATA; break; case OMP_TEAMS: ort = OMP_TEAMS_COMBINED (expr) ? ORT_COMBINED_TEAMS : ORT_TEAMS; if (gimplify_omp_ctxp == NULL || gimplify_omp_ctxp->region_type == ORT_IMPLICIT_TARGET) ort = (enum omp_region_type) (ort | ORT_HOST_TEAMS); break; case OACC_HOST_DATA: ort = ORT_ACC_HOST_DATA; break; default: gcc_unreachable (); } bool save_in_omp_construct = in_omp_construct; if ((ort & ORT_ACC) == 0) in_omp_construct = false; gimplify_scan_omp_clauses (&OMP_CLAUSES (expr), pre_p, ort, TREE_CODE (expr)); if (TREE_CODE (expr) == OMP_TARGET) optimize_target_teams (expr, pre_p); if ((ort & (ORT_TARGET | ORT_TARGET_DATA)) != 0 || (ort & ORT_HOST_TEAMS) == ORT_HOST_TEAMS) { push_gimplify_context (); gimple *g = gimplify_and_return_first (OMP_BODY (expr), &body); if (gimple_code (g) == GIMPLE_BIND) pop_gimplify_context (g); else pop_gimplify_context (NULL); if ((ort & ORT_TARGET_DATA) != 0) { enum built_in_function end_ix; switch (TREE_CODE (expr)) { case OACC_DATA: case OACC_HOST_DATA: end_ix = BUILT_IN_GOACC_DATA_END; break; case OMP_TARGET_DATA: end_ix = BUILT_IN_GOMP_TARGET_END_DATA; break; default: gcc_unreachable (); } tree fn = builtin_decl_explicit (end_ix); g = gimple_build_call (fn, 0); gimple_seq cleanup = NULL; gimple_seq_add_stmt (&cleanup, g); g = gimple_build_try (body, cleanup, GIMPLE_TRY_FINALLY); body = NULL; gimple_seq_add_stmt (&body, g); } } else gimplify_and_add (OMP_BODY (expr), &body); gimplify_adjust_omp_clauses (pre_p, body, &OMP_CLAUSES (expr), TREE_CODE (expr)); in_omp_construct = save_in_omp_construct; switch (TREE_CODE (expr)) { case OACC_DATA: stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_DATA, OMP_CLAUSES (expr)); break; case OACC_HOST_DATA: if (omp_find_clause (OMP_CLAUSES (expr), OMP_CLAUSE_IF_PRESENT)) { for (tree c = OMP_CLAUSES (expr); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR) OMP_CLAUSE_USE_DEVICE_PTR_IF_PRESENT (c) = 1; } stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_HOST_DATA, OMP_CLAUSES (expr)); break; case OACC_KERNELS: stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_KERNELS, OMP_CLAUSES (expr)); break; case OACC_PARALLEL: stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_PARALLEL, OMP_CLAUSES (expr)); break; case OACC_SERIAL: stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_OACC_SERIAL, OMP_CLAUSES (expr)); break; case OMP_SECTIONS: stmt = gimple_build_omp_sections (body, OMP_CLAUSES (expr)); break; case OMP_SINGLE: stmt = gimple_build_omp_single (body, OMP_CLAUSES (expr)); break; case OMP_TARGET: stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_REGION, OMP_CLAUSES (expr)); break; case OMP_TARGET_DATA: /* Put use_device_{ptr,addr} clauses last, as map clauses are supposed to be evaluated before the use_device_{ptr,addr} clauses if they refer to the same variables. */ { tree use_device_clauses; tree *pc, *uc = &use_device_clauses; for (pc = &OMP_CLAUSES (expr); *pc; ) if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_USE_DEVICE_PTR || OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_USE_DEVICE_ADDR) { *uc = *pc; *pc = OMP_CLAUSE_CHAIN (*pc); uc = &OMP_CLAUSE_CHAIN (*uc); } else pc = &OMP_CLAUSE_CHAIN (*pc); *uc = NULL_TREE; *pc = use_device_clauses; stmt = gimple_build_omp_target (body, GF_OMP_TARGET_KIND_DATA, OMP_CLAUSES (expr)); } break; case OMP_TEAMS: stmt = gimple_build_omp_teams (body, OMP_CLAUSES (expr)); if ((ort & ORT_HOST_TEAMS) == ORT_HOST_TEAMS) gimple_omp_teams_set_host (as_a <gomp_teams *> (stmt), true); break; default: gcc_unreachable (); } gimplify_seq_add_stmt (pre_p, stmt); *expr_p = NULL_TREE; } /* Gimplify the gross structure of OpenACC enter/exit data, update, and OpenMP target update constructs. */ static void gimplify_omp_target_update (tree *expr_p, gimple_seq *pre_p) { tree expr = *expr_p; int kind; gomp_target *stmt; enum omp_region_type ort = ORT_WORKSHARE; switch (TREE_CODE (expr)) { case OACC_ENTER_DATA: case OACC_EXIT_DATA: kind = GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA; ort = ORT_ACC; break; case OACC_UPDATE: kind = GF_OMP_TARGET_KIND_OACC_UPDATE; ort = ORT_ACC; break; case OMP_TARGET_UPDATE: kind = GF_OMP_TARGET_KIND_UPDATE; break; case OMP_TARGET_ENTER_DATA: kind = GF_OMP_TARGET_KIND_ENTER_DATA; break; case OMP_TARGET_EXIT_DATA: kind = GF_OMP_TARGET_KIND_EXIT_DATA; break; default: gcc_unreachable (); } gimplify_scan_omp_clauses (&OMP_STANDALONE_CLAUSES (expr), pre_p, ort, TREE_CODE (expr)); gimplify_adjust_omp_clauses (pre_p, NULL, &OMP_STANDALONE_CLAUSES (expr), TREE_CODE (expr)); if (TREE_CODE (expr) == OACC_UPDATE && omp_find_clause (OMP_STANDALONE_CLAUSES (expr), OMP_CLAUSE_IF_PRESENT)) { /* The runtime uses GOMP_MAP_{TO,FROM} to denote the if_present clause. */ for (tree c = OMP_STANDALONE_CLAUSES (expr); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP) switch (OMP_CLAUSE_MAP_KIND (c)) { case GOMP_MAP_FORCE_TO: OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_TO); break; case GOMP_MAP_FORCE_FROM: OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_FROM); break; default: break; } } else if (TREE_CODE (expr) == OACC_EXIT_DATA && omp_find_clause (OMP_STANDALONE_CLAUSES (expr), OMP_CLAUSE_FINALIZE)) { /* Use GOMP_MAP_DELETE/GOMP_MAP_FORCE_FROM to denote "finalize" semantics. */ bool have_clause = false; for (tree c = OMP_STANDALONE_CLAUSES (expr); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP) switch (OMP_CLAUSE_MAP_KIND (c)) { case GOMP_MAP_FROM: OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_FORCE_FROM); have_clause = true; break; case GOMP_MAP_RELEASE: OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_DELETE); have_clause = true; break; case GOMP_MAP_TO_PSET: /* Fortran arrays with descriptors must map that descriptor when doing standalone "attach" operations (in OpenACC). In that case GOMP_MAP_TO_PSET appears by itself with no preceding clause (see trans-openmp.c:gfc_trans_omp_clauses). */ break; case GOMP_MAP_POINTER: /* TODO PR92929: we may see these here, but they'll always follow one of the clauses above, and will be handled by libgomp as one group, so no handling required here. */ gcc_assert (have_clause); break; case GOMP_MAP_DETACH: OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_FORCE_DETACH); have_clause = false; break; case GOMP_MAP_STRUCT: have_clause = false; break; default: gcc_unreachable (); } } stmt = gimple_build_omp_target (NULL, kind, OMP_STANDALONE_CLAUSES (expr)); gimplify_seq_add_stmt (pre_p, stmt); *expr_p = NULL_TREE; } /* A subroutine of gimplify_omp_atomic. The front end is supposed to have stabilized the lhs of the atomic operation as *ADDR. Return true if EXPR is this stabilized form. */ static bool goa_lhs_expr_p (tree expr, tree addr) { /* Also include casts to other type variants. The C front end is fond of adding these for e.g. volatile variables. This is like STRIP_TYPE_NOPS but includes the main variant lookup. */ STRIP_USELESS_TYPE_CONVERSION (expr); if (TREE_CODE (expr) == INDIRECT_REF) { expr = TREE_OPERAND (expr, 0); while (expr != addr && (CONVERT_EXPR_P (expr) || TREE_CODE (expr) == NON_LVALUE_EXPR) && TREE_CODE (expr) == TREE_CODE (addr) && types_compatible_p (TREE_TYPE (expr), TREE_TYPE (addr))) { expr = TREE_OPERAND (expr, 0); addr = TREE_OPERAND (addr, 0); } if (expr == addr) return true; return (TREE_CODE (addr) == ADDR_EXPR && TREE_CODE (expr) == ADDR_EXPR && TREE_OPERAND (addr, 0) == TREE_OPERAND (expr, 0)); } if (TREE_CODE (addr) == ADDR_EXPR && expr == TREE_OPERAND (addr, 0)) return true; return false; } /* Walk *EXPR_P and replace appearances of *LHS_ADDR with LHS_VAR. If an expression does not involve the lhs, evaluate it into a temporary. Return 1 if the lhs appeared as a subexpression, 0 if it did not, or -1 if an error was encountered. */ static int goa_stabilize_expr (tree *expr_p, gimple_seq *pre_p, tree lhs_addr, tree lhs_var) { tree expr = *expr_p; int saw_lhs; if (goa_lhs_expr_p (expr, lhs_addr)) { *expr_p = lhs_var; return 1; } if (is_gimple_val (expr)) return 0; saw_lhs = 0; switch (TREE_CODE_CLASS (TREE_CODE (expr))) { case tcc_binary: case tcc_comparison: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p, lhs_addr, lhs_var); /* FALLTHRU */ case tcc_unary: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr, lhs_var); break; case tcc_expression: switch (TREE_CODE (expr)) { case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case BIT_INSERT_EXPR: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p, lhs_addr, lhs_var); /* FALLTHRU */ case TRUTH_NOT_EXPR: saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr, lhs_var); break; case COMPOUND_EXPR: /* Break out any preevaluations from cp_build_modify_expr. */ for (; TREE_CODE (expr) == COMPOUND_EXPR; expr = TREE_OPERAND (expr, 1)) gimplify_stmt (&TREE_OPERAND (expr, 0), pre_p); *expr_p = expr; return goa_stabilize_expr (expr_p, pre_p, lhs_addr, lhs_var); default: break; } break; case tcc_reference: if (TREE_CODE (expr) == BIT_FIELD_REF) saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p, lhs_addr, lhs_var); break; default: break; } if (saw_lhs == 0) { enum gimplify_status gs; gs = gimplify_expr (expr_p, pre_p, NULL, is_gimple_val, fb_rvalue); if (gs != GS_ALL_DONE) saw_lhs = -1; } return saw_lhs; } /* Gimplify an OMP_ATOMIC statement. */ static enum gimplify_status gimplify_omp_atomic (tree *expr_p, gimple_seq *pre_p) { tree addr = TREE_OPERAND (*expr_p, 0); tree rhs = TREE_CODE (*expr_p) == OMP_ATOMIC_READ ? NULL : TREE_OPERAND (*expr_p, 1); tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr))); tree tmp_load; gomp_atomic_load *loadstmt; gomp_atomic_store *storestmt; tmp_load = create_tmp_reg (type); if (rhs && goa_stabilize_expr (&rhs, pre_p, addr, tmp_load) < 0) return GS_ERROR; if (gimplify_expr (&addr, pre_p, NULL, is_gimple_val, fb_rvalue) != GS_ALL_DONE) return GS_ERROR; loadstmt = gimple_build_omp_atomic_load (tmp_load, addr, OMP_ATOMIC_MEMORY_ORDER (*expr_p)); gimplify_seq_add_stmt (pre_p, loadstmt); if (rhs) { /* BIT_INSERT_EXPR is not valid for non-integral bitfield representatives. Use BIT_FIELD_REF on the lhs instead. */ if (TREE_CODE (rhs) == BIT_INSERT_EXPR && !INTEGRAL_TYPE_P (TREE_TYPE (tmp_load))) { tree bitpos = TREE_OPERAND (rhs, 2); tree op1 = TREE_OPERAND (rhs, 1); tree bitsize; tree tmp_store = tmp_load; if (TREE_CODE (*expr_p) == OMP_ATOMIC_CAPTURE_OLD) tmp_store = get_initialized_tmp_var (tmp_load, pre_p); if (INTEGRAL_TYPE_P (TREE_TYPE (op1))) bitsize = bitsize_int (TYPE_PRECISION (TREE_TYPE (op1))); else bitsize = TYPE_SIZE (TREE_TYPE (op1)); gcc_assert (TREE_OPERAND (rhs, 0) == tmp_load); tree t = build2_loc (EXPR_LOCATION (rhs), MODIFY_EXPR, void_type_node, build3_loc (EXPR_LOCATION (rhs), BIT_FIELD_REF, TREE_TYPE (op1), tmp_store, bitsize, bitpos), op1); gimplify_and_add (t, pre_p); rhs = tmp_store; } if (gimplify_expr (&rhs, pre_p, NULL, is_gimple_val, fb_rvalue) != GS_ALL_DONE) return GS_ERROR; } if (TREE_CODE (*expr_p) == OMP_ATOMIC_READ) rhs = tmp_load; storestmt = gimple_build_omp_atomic_store (rhs, OMP_ATOMIC_MEMORY_ORDER (*expr_p)); gimplify_seq_add_stmt (pre_p, storestmt); switch (TREE_CODE (*expr_p)) { case OMP_ATOMIC_READ: case OMP_ATOMIC_CAPTURE_OLD: *expr_p = tmp_load; gimple_omp_atomic_set_need_value (loadstmt); break; case OMP_ATOMIC_CAPTURE_NEW: *expr_p = rhs; gimple_omp_atomic_set_need_value (storestmt); break; default: *expr_p = NULL; break; } return GS_ALL_DONE; } /* Gimplify a TRANSACTION_EXPR. This involves gimplification of the body, and adding some EH bits. */ static enum gimplify_status gimplify_transaction (tree *expr_p, gimple_seq *pre_p) { tree expr = *expr_p, temp, tbody = TRANSACTION_EXPR_BODY (expr); gimple *body_stmt; gtransaction *trans_stmt; gimple_seq body = NULL; int subcode = 0; /* Wrap the transaction body in a BIND_EXPR so we have a context where to put decls for OMP. */ if (TREE_CODE (tbody) != BIND_EXPR) { tree bind = build3 (BIND_EXPR, void_type_node, NULL, tbody, NULL); TREE_SIDE_EFFECTS (bind) = 1; SET_EXPR_LOCATION (bind, EXPR_LOCATION (tbody)); TRANSACTION_EXPR_BODY (expr) = bind; } push_gimplify_context (); temp = voidify_wrapper_expr (*expr_p, NULL); body_stmt = gimplify_and_return_first (TRANSACTION_EXPR_BODY (expr), &body); pop_gimplify_context (body_stmt); trans_stmt = gimple_build_transaction (body); if (TRANSACTION_EXPR_OUTER (expr)) subcode = GTMA_IS_OUTER; else if (TRANSACTION_EXPR_RELAXED (expr)) subcode = GTMA_IS_RELAXED; gimple_transaction_set_subcode (trans_stmt, subcode); gimplify_seq_add_stmt (pre_p, trans_stmt); if (temp) { *expr_p = temp; return GS_OK; } *expr_p = NULL_TREE; return GS_ALL_DONE; } /* Gimplify an OMP_ORDERED construct. EXPR is the tree version. BODY is the OMP_BODY of the original EXPR (which has already been gimplified so it's not present in the EXPR). Return the gimplified GIMPLE_OMP_ORDERED tuple. */ static gimple * gimplify_omp_ordered (tree expr, gimple_seq body) { tree c, decls; int failures = 0; unsigned int i; tree source_c = NULL_TREE; tree sink_c = NULL_TREE; if (gimplify_omp_ctxp) { for (c = OMP_ORDERED_CLAUSES (expr); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND && gimplify_omp_ctxp->loop_iter_var.is_empty () && (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK || OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE)) { error_at (OMP_CLAUSE_LOCATION (c), "%<ordered%> construct with %<depend%> clause must be " "closely nested inside a loop with %<ordered%> clause " "with a parameter"); failures++; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND && OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK) { bool fail = false; for (decls = OMP_CLAUSE_DECL (c), i = 0; decls && TREE_CODE (decls) == TREE_LIST; decls = TREE_CHAIN (decls), ++i) if (i >= gimplify_omp_ctxp->loop_iter_var.length () / 2) continue; else if (TREE_VALUE (decls) != gimplify_omp_ctxp->loop_iter_var[2 * i]) { error_at (OMP_CLAUSE_LOCATION (c), "variable %qE is not an iteration " "of outermost loop %d, expected %qE", TREE_VALUE (decls), i + 1, gimplify_omp_ctxp->loop_iter_var[2 * i]); fail = true; failures++; } else TREE_VALUE (decls) = gimplify_omp_ctxp->loop_iter_var[2 * i + 1]; if (!fail && i != gimplify_omp_ctxp->loop_iter_var.length () / 2) { error_at (OMP_CLAUSE_LOCATION (c), "number of variables in %<depend%> clause with " "%<sink%> modifier does not match number of " "iteration variables"); failures++; } sink_c = c; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND && OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE) { if (source_c) { error_at (OMP_CLAUSE_LOCATION (c), "more than one %<depend%> clause with %<source%> " "modifier on an %<ordered%> construct"); failures++; } else source_c = c; } } if (source_c && sink_c) { error_at (OMP_CLAUSE_LOCATION (source_c), "%<depend%> clause with %<source%> modifier specified " "together with %<depend%> clauses with %<sink%> modifier " "on the same construct"); failures++; } if (failures) return gimple_build_nop (); return gimple_build_omp_ordered (body, OMP_ORDERED_CLAUSES (expr)); } /* Convert the GENERIC expression tree *EXPR_P to GIMPLE. If the expression produces a value to be used as an operand inside a GIMPLE statement, the value will be stored back in *EXPR_P. This value will be a tree of class tcc_declaration, tcc_constant, tcc_reference or an SSA_NAME. The corresponding sequence of GIMPLE statements is emitted in PRE_P and POST_P. Additionally, this process may overwrite parts of the input expression during gimplification. Ideally, it should be possible to do non-destructive gimplification. EXPR_P points to the GENERIC expression to convert to GIMPLE. If the expression needs to evaluate to a value to be used as an operand in a GIMPLE statement, this value will be stored in *EXPR_P on exit. This happens when the caller specifies one of fb_lvalue or fb_rvalue fallback flags. PRE_P will contain the sequence of GIMPLE statements corresponding to the evaluation of EXPR and all the side-effects that must be executed before the main expression. On exit, the last statement of PRE_P is the core statement being gimplified. For instance, when gimplifying 'if (++a)' the last statement in PRE_P will be 'if (t.1)' where t.1 is the result of pre-incrementing 'a'. POST_P will contain the sequence of GIMPLE statements corresponding to the evaluation of all the side-effects that must be executed after the main expression. If this is NULL, the post side-effects are stored at the end of PRE_P. The reason why the output is split in two is to handle post side-effects explicitly. In some cases, an expression may have inner and outer post side-effects which need to be emitted in an order different from the one given by the recursive traversal. For instance, for the expression (*p--)++ the post side-effects of '--' must actually occur *after* the post side-effects of '++'. However, gimplification will first visit the inner expression, so if a separate POST sequence was not used, the resulting sequence would be: 1 t.1 = *p 2 p = p - 1 3 t.2 = t.1 + 1 4 *p = t.2 However, the post-decrement operation in line #2 must not be evaluated until after the store to *p at line #4, so the correct sequence should be: 1 t.1 = *p 2 t.2 = t.1 + 1 3 *p = t.2 4 p = p - 1 So, by specifying a separate post queue, it is possible to emit the post side-effects in the correct order. If POST_P is NULL, an internal queue will be used. Before returning to the caller, the sequence POST_P is appended to the main output sequence PRE_P. GIMPLE_TEST_F points to a function that takes a tree T and returns nonzero if T is in the GIMPLE form requested by the caller. The GIMPLE predicates are in gimple.c. FALLBACK tells the function what sort of a temporary we want if gimplification cannot produce an expression that complies with GIMPLE_TEST_F. fb_none means that no temporary should be generated fb_rvalue means that an rvalue is OK to generate fb_lvalue means that an lvalue is OK to generate fb_either means that either is OK, but an lvalue is preferable. fb_mayfail means that gimplification may fail (in which case GS_ERROR will be returned) The return value is either GS_ERROR or GS_ALL_DONE, since this function iterates until EXPR is completely gimplified or an error occurs. */ enum gimplify_status gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, bool (*gimple_test_f) (tree), fallback_t fallback) { tree tmp; gimple_seq internal_pre = NULL; gimple_seq internal_post = NULL; tree save_expr; bool is_statement; location_t saved_location; enum gimplify_status ret; gimple_stmt_iterator pre_last_gsi, post_last_gsi; tree label; save_expr = *expr_p; if (save_expr == NULL_TREE) return GS_ALL_DONE; /* If we are gimplifying a top-level statement, PRE_P must be valid. */ is_statement = gimple_test_f == is_gimple_stmt; if (is_statement) gcc_assert (pre_p); /* Consistency checks. */ if (gimple_test_f == is_gimple_reg) gcc_assert (fallback & (fb_rvalue | fb_lvalue)); else if (gimple_test_f == is_gimple_val || gimple_test_f == is_gimple_call_addr || gimple_test_f == is_gimple_condexpr || gimple_test_f == is_gimple_condexpr_for_cond || gimple_test_f == is_gimple_mem_rhs || gimple_test_f == is_gimple_mem_rhs_or_call || gimple_test_f == is_gimple_reg_rhs || gimple_test_f == is_gimple_reg_rhs_or_call || gimple_test_f == is_gimple_asm_val || gimple_test_f == is_gimple_mem_ref_addr) gcc_assert (fallback & fb_rvalue); else if (gimple_test_f == is_gimple_min_lval || gimple_test_f == is_gimple_lvalue) gcc_assert (fallback & fb_lvalue); else if (gimple_test_f == is_gimple_addressable) gcc_assert (fallback & fb_either); else if (gimple_test_f == is_gimple_stmt) gcc_assert (fallback == fb_none); else { /* We should have recognized the GIMPLE_TEST_F predicate to know what kind of fallback to use in case a temporary is needed to hold the value or address of *EXPR_P. */ gcc_unreachable (); } /* We used to check the predicate here and return immediately if it succeeds. This is wrong; the design is for gimplification to be idempotent, and for the predicates to only test for valid forms, not whether they are fully simplified. */ if (pre_p == NULL) pre_p = &internal_pre; if (post_p == NULL) post_p = &internal_post; /* Remember the last statements added to PRE_P and POST_P. Every new statement added by the gimplification helpers needs to be annotated with location information. To centralize the responsibility, we remember the last statement that had been added to both queues before gimplifying *EXPR_P. If gimplification produces new statements in PRE_P and POST_P, those statements will be annotated with the same location information as *EXPR_P. */ pre_last_gsi = gsi_last (*pre_p); post_last_gsi = gsi_last (*post_p); saved_location = input_location; if (save_expr != error_mark_node && EXPR_HAS_LOCATION (*expr_p)) input_location = EXPR_LOCATION (*expr_p); /* Loop over the specific gimplifiers until the toplevel node remains the same. */ do { /* Strip away as many useless type conversions as possible at the toplevel. */ STRIP_USELESS_TYPE_CONVERSION (*expr_p); /* Remember the expr. */ save_expr = *expr_p; /* Die, die, die, my darling. */ if (error_operand_p (save_expr)) { ret = GS_ERROR; break; } /* Do any language-specific gimplification. */ ret = ((enum gimplify_status) lang_hooks.gimplify_expr (expr_p, pre_p, post_p)); if (ret == GS_OK) { if (*expr_p == NULL_TREE) break; if (*expr_p != save_expr) continue; } else if (ret != GS_UNHANDLED) break; /* Make sure that all the cases set 'ret' appropriately. */ ret = GS_UNHANDLED; switch (TREE_CODE (*expr_p)) { /* First deal with the special cases. */ case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: ret = gimplify_self_mod_expr (expr_p, pre_p, post_p, fallback != fb_none, TREE_TYPE (*expr_p)); break; case VIEW_CONVERT_EXPR: if ((fallback & fb_rvalue) && is_gimple_reg_type (TREE_TYPE (*expr_p)) && is_gimple_reg_type (TREE_TYPE (TREE_OPERAND (*expr_p, 0)))) { ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; } /* Fallthru. */ case ARRAY_REF: case ARRAY_RANGE_REF: case REALPART_EXPR: case IMAGPART_EXPR: case COMPONENT_REF: ret = gimplify_compound_lval (expr_p, pre_p, post_p, fallback ? fallback : fb_rvalue); break; case COND_EXPR: ret = gimplify_cond_expr (expr_p, pre_p, fallback); /* C99 code may assign to an array in a structure value of a conditional expression, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p, false); mark_addressable (*expr_p); ret = GS_OK; } break; case CALL_EXPR: ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none); /* C99 code may assign to an array in a structure returned from a function, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p, false); mark_addressable (*expr_p); ret = GS_OK; } break; case TREE_LIST: gcc_unreachable (); case COMPOUND_EXPR: ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none); break; case COMPOUND_LITERAL_EXPR: ret = gimplify_compound_literal_expr (expr_p, pre_p, gimple_test_f, fallback); break; case MODIFY_EXPR: case INIT_EXPR: ret = gimplify_modify_expr (expr_p, pre_p, post_p, fallback != fb_none); break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: { /* Preserve the original type of the expression and the source location of the outer expression. */ tree org_type = TREE_TYPE (*expr_p); *expr_p = gimple_boolify (*expr_p); *expr_p = build3_loc (input_location, COND_EXPR, org_type, *expr_p, fold_convert_loc (input_location, org_type, boolean_true_node), fold_convert_loc (input_location, org_type, boolean_false_node)); ret = GS_OK; break; } case TRUTH_NOT_EXPR: { tree type = TREE_TYPE (*expr_p); /* The parsers are careful to generate TRUTH_NOT_EXPR only with operands that are always zero or one. We do not fold here but handle the only interesting case manually, as fold may re-introduce the TRUTH_NOT_EXPR. */ *expr_p = gimple_boolify (*expr_p); if (TYPE_PRECISION (TREE_TYPE (*expr_p)) == 1) *expr_p = build1_loc (input_location, BIT_NOT_EXPR, TREE_TYPE (*expr_p), TREE_OPERAND (*expr_p, 0)); else *expr_p = build2_loc (input_location, BIT_XOR_EXPR, TREE_TYPE (*expr_p), TREE_OPERAND (*expr_p, 0), build_int_cst (TREE_TYPE (*expr_p), 1)); if (!useless_type_conversion_p (type, TREE_TYPE (*expr_p))) *expr_p = fold_convert_loc (input_location, type, *expr_p); ret = GS_OK; break; } case ADDR_EXPR: ret = gimplify_addr_expr (expr_p, pre_p, post_p); break; case ANNOTATE_EXPR: { tree cond = TREE_OPERAND (*expr_p, 0); tree kind = TREE_OPERAND (*expr_p, 1); tree data = TREE_OPERAND (*expr_p, 2); tree type = TREE_TYPE (cond); if (!INTEGRAL_TYPE_P (type)) { *expr_p = cond; ret = GS_OK; break; } tree tmp = create_tmp_var (type); gimplify_arg (&cond, pre_p, EXPR_LOCATION (*expr_p)); gcall *call = gimple_build_call_internal (IFN_ANNOTATE, 3, cond, kind, data); gimple_call_set_lhs (call, tmp); gimplify_seq_add_stmt (pre_p, call); *expr_p = tmp; ret = GS_ALL_DONE; break; } case VA_ARG_EXPR: ret = gimplify_va_arg_expr (expr_p, pre_p, post_p); break; CASE_CONVERT: if (IS_EMPTY_STMT (*expr_p)) { ret = GS_ALL_DONE; break; } if (VOID_TYPE_P (TREE_TYPE (*expr_p)) || fallback == fb_none) { /* Just strip a conversion to void (or in void context) and try again. */ *expr_p = TREE_OPERAND (*expr_p, 0); ret = GS_OK; break; } ret = gimplify_conversion (expr_p); if (ret == GS_ERROR) break; if (*expr_p != save_expr) break; /* FALLTHRU */ case FIX_TRUNC_EXPR: /* unary_expr: ... | '(' cast ')' val | ... */ ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); break; case INDIRECT_REF: { bool volatilep = TREE_THIS_VOLATILE (*expr_p); bool notrap = TREE_THIS_NOTRAP (*expr_p); tree saved_ptr_type = TREE_TYPE (TREE_OPERAND (*expr_p, 0)); *expr_p = fold_indirect_ref_loc (input_location, *expr_p); if (*expr_p != save_expr) { ret = GS_OK; break; } ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_reg, fb_rvalue); if (ret == GS_ERROR) break; recalculate_side_effects (*expr_p); *expr_p = fold_build2_loc (input_location, MEM_REF, TREE_TYPE (*expr_p), TREE_OPERAND (*expr_p, 0), build_int_cst (saved_ptr_type, 0)); TREE_THIS_VOLATILE (*expr_p) = volatilep; TREE_THIS_NOTRAP (*expr_p) = notrap; ret = GS_OK; break; } /* We arrive here through the various re-gimplifcation paths. */ case MEM_REF: /* First try re-folding the whole thing. */ tmp = fold_binary (MEM_REF, TREE_TYPE (*expr_p), TREE_OPERAND (*expr_p, 0), TREE_OPERAND (*expr_p, 1)); if (tmp) { REF_REVERSE_STORAGE_ORDER (tmp) = REF_REVERSE_STORAGE_ORDER (*expr_p); *expr_p = tmp; recalculate_side_effects (*expr_p); ret = GS_OK; break; } /* Avoid re-gimplifying the address operand if it is already in suitable form. Re-gimplifying would mark the address operand addressable. Always gimplify when not in SSA form as we still may have to gimplify decls with value-exprs. */ if (!gimplify_ctxp || !gimple_in_ssa_p (cfun) || !is_gimple_mem_ref_addr (TREE_OPERAND (*expr_p, 0))) { ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_mem_ref_addr, fb_rvalue); if (ret == GS_ERROR) break; } recalculate_side_effects (*expr_p); ret = GS_ALL_DONE; break; /* Constants need not be gimplified. */ case INTEGER_CST: case REAL_CST: case FIXED_CST: case STRING_CST: case COMPLEX_CST: case VECTOR_CST: /* Drop the overflow flag on constants, we do not want that in the GIMPLE IL. */ if (TREE_OVERFLOW_P (*expr_p)) *expr_p = drop_tree_overflow (*expr_p); ret = GS_ALL_DONE; break; case CONST_DECL: /* If we require an lvalue, such as for ADDR_EXPR, retain the CONST_DECL node. Otherwise the decl is replaceable by its value. */ /* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */ if (fallback & fb_lvalue) ret = GS_ALL_DONE; else { *expr_p = DECL_INITIAL (*expr_p); ret = GS_OK; } break; case DECL_EXPR: ret = gimplify_decl_expr (expr_p, pre_p); break; case BIND_EXPR: ret = gimplify_bind_expr (expr_p, pre_p); break; case LOOP_EXPR: ret = gimplify_loop_expr (expr_p, pre_p); break; case SWITCH_EXPR: ret = gimplify_switch_expr (expr_p, pre_p); break; case EXIT_EXPR: ret = gimplify_exit_expr (expr_p); break; case GOTO_EXPR: /* If the target is not LABEL, then it is a computed jump and the target needs to be gimplified. */ if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL) { ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p, NULL, is_gimple_val, fb_rvalue); if (ret == GS_ERROR) break; } gimplify_seq_add_stmt (pre_p, gimple_build_goto (GOTO_DESTINATION (*expr_p))); ret = GS_ALL_DONE; break; case PREDICT_EXPR: gimplify_seq_add_stmt (pre_p, gimple_build_predict (PREDICT_EXPR_PREDICTOR (*expr_p), PREDICT_EXPR_OUTCOME (*expr_p))); ret = GS_ALL_DONE; break; case LABEL_EXPR: ret = gimplify_label_expr (expr_p, pre_p); label = LABEL_EXPR_LABEL (*expr_p); gcc_assert (decl_function_context (label) == current_function_decl); /* If the label is used in a goto statement, or address of the label is taken, we need to unpoison all variables that were seen so far. Doing so would prevent us from reporting a false positives. */ if (asan_poisoned_variables && asan_used_labels != NULL && asan_used_labels->contains (label)) asan_poison_variables (asan_poisoned_variables, false, pre_p); break; case CASE_LABEL_EXPR: ret = gimplify_case_label_expr (expr_p, pre_p); if (gimplify_ctxp->live_switch_vars) asan_poison_variables (gimplify_ctxp->live_switch_vars, false, pre_p); break; case RETURN_EXPR: ret = gimplify_return_expr (*expr_p, pre_p); break; case CONSTRUCTOR: /* Don't reduce this in place; let gimplify_init_constructor work its magic. Buf if we're just elaborating this for side effects, just gimplify any element that has side-effects. */ if (fallback == fb_none) { unsigned HOST_WIDE_INT ix; tree val; tree temp = NULL_TREE; FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (*expr_p), ix, val) if (TREE_SIDE_EFFECTS (val)) append_to_statement_list (val, &temp); *expr_p = temp; ret = temp ? GS_OK : GS_ALL_DONE; } /* C99 code may assign to an array in a constructed structure or union, and this has undefined behavior only on execution, so create a temporary if an lvalue is required. */ else if (fallback == fb_lvalue) { *expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p, false); mark_addressable (*expr_p); ret = GS_OK; } else ret = GS_ALL_DONE; break; /* The following are special cases that are not handled by the original GIMPLE grammar. */ /* SAVE_EXPR nodes are converted into a GIMPLE identifier and eliminated. */ case SAVE_EXPR: ret = gimplify_save_expr (expr_p, pre_p, post_p); break; case BIT_FIELD_REF: ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_lvalue, fb_either); recalculate_side_effects (*expr_p); break; case TARGET_MEM_REF: { enum gimplify_status r0 = GS_ALL_DONE, r1 = GS_ALL_DONE; if (TMR_BASE (*expr_p)) r0 = gimplify_expr (&TMR_BASE (*expr_p), pre_p, post_p, is_gimple_mem_ref_addr, fb_either); if (TMR_INDEX (*expr_p)) r1 = gimplify_expr (&TMR_INDEX (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); if (TMR_INDEX2 (*expr_p)) r1 = gimplify_expr (&TMR_INDEX2 (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); /* TMR_STEP and TMR_OFFSET are always integer constants. */ ret = MIN (r0, r1); } break; case NON_LVALUE_EXPR: /* This should have been stripped above. */ gcc_unreachable (); case ASM_EXPR: ret = gimplify_asm_expr (expr_p, pre_p, post_p); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: { gimple_seq eval, cleanup; gtry *try_; /* Calls to destructors are generated automatically in FINALLY/CATCH block. They should have location as UNKNOWN_LOCATION. However, gimplify_call_expr will reset these call stmts to input_location if it finds stmt's location is unknown. To prevent resetting for destructors, we set the input_location to unknown. Note that this only affects the destructor calls in FINALLY/CATCH block, and will automatically reset to its original value by the end of gimplify_expr. */ input_location = UNKNOWN_LOCATION; eval = cleanup = NULL; gimplify_and_add (TREE_OPERAND (*expr_p, 0), &eval); if (TREE_CODE (*expr_p) == TRY_FINALLY_EXPR && TREE_CODE (TREE_OPERAND (*expr_p, 1)) == EH_ELSE_EXPR) { gimple_seq n = NULL, e = NULL; gimplify_and_add (TREE_OPERAND (TREE_OPERAND (*expr_p, 1), 0), &n); gimplify_and_add (TREE_OPERAND (TREE_OPERAND (*expr_p, 1), 1), &e); if (!gimple_seq_empty_p (n) && !gimple_seq_empty_p (e)) { geh_else *stmt = gimple_build_eh_else (n, e); gimple_seq_add_stmt (&cleanup, stmt); } } else gimplify_and_add (TREE_OPERAND (*expr_p, 1), &cleanup); /* Don't create bogus GIMPLE_TRY with empty cleanup. */ if (gimple_seq_empty_p (cleanup)) { gimple_seq_add_seq (pre_p, eval); ret = GS_ALL_DONE; break; } try_ = gimple_build_try (eval, cleanup, TREE_CODE (*expr_p) == TRY_FINALLY_EXPR ? GIMPLE_TRY_FINALLY : GIMPLE_TRY_CATCH); if (EXPR_HAS_LOCATION (save_expr)) gimple_set_location (try_, EXPR_LOCATION (save_expr)); else if (LOCATION_LOCUS (saved_location) != UNKNOWN_LOCATION) gimple_set_location (try_, saved_location); if (TREE_CODE (*expr_p) == TRY_CATCH_EXPR) gimple_try_set_catch_is_cleanup (try_, TRY_CATCH_IS_CLEANUP (*expr_p)); gimplify_seq_add_stmt (pre_p, try_); ret = GS_ALL_DONE; break; } case CLEANUP_POINT_EXPR: ret = gimplify_cleanup_point_expr (expr_p, pre_p); break; case TARGET_EXPR: ret = gimplify_target_expr (expr_p, pre_p, post_p); break; case CATCH_EXPR: { gimple *c; gimple_seq handler = NULL; gimplify_and_add (CATCH_BODY (*expr_p), &handler); c = gimple_build_catch (CATCH_TYPES (*expr_p), handler); gimplify_seq_add_stmt (pre_p, c); ret = GS_ALL_DONE; break; } case EH_FILTER_EXPR: { gimple *ehf; gimple_seq failure = NULL; gimplify_and_add (EH_FILTER_FAILURE (*expr_p), &failure); ehf = gimple_build_eh_filter (EH_FILTER_TYPES (*expr_p), failure); gimple_set_no_warning (ehf, TREE_NO_WARNING (*expr_p)); gimplify_seq_add_stmt (pre_p, ehf); ret = GS_ALL_DONE; break; } case OBJ_TYPE_REF: { enum gimplify_status r0, r1; r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p, post_p, is_gimple_val, fb_rvalue); TREE_SIDE_EFFECTS (*expr_p) = 0; ret = MIN (r0, r1); } break; case LABEL_DECL: /* We get here when taking the address of a label. We mark the label as "forced"; meaning it can never be removed and it is a potential target for any computed goto. */ FORCED_LABEL (*expr_p) = 1; ret = GS_ALL_DONE; break; case STATEMENT_LIST: ret = gimplify_statement_list (expr_p, pre_p); break; case WITH_SIZE_EXPR: { gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p == &internal_post ? NULL : post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = GS_ALL_DONE; } break; case VAR_DECL: case PARM_DECL: ret = gimplify_var_or_parm_decl (expr_p); break; case RESULT_DECL: /* When within an OMP context, notice uses of variables. */ if (gimplify_omp_ctxp) omp_notice_variable (gimplify_omp_ctxp, *expr_p, true); ret = GS_ALL_DONE; break; case DEBUG_EXPR_DECL: gcc_unreachable (); case DEBUG_BEGIN_STMT: gimplify_seq_add_stmt (pre_p, gimple_build_debug_begin_stmt (TREE_BLOCK (*expr_p), EXPR_LOCATION (*expr_p))); ret = GS_ALL_DONE; *expr_p = NULL; break; case SSA_NAME: /* Allow callbacks into the gimplifier during optimization. */ ret = GS_ALL_DONE; break; case OMP_PARALLEL: gimplify_omp_parallel (expr_p, pre_p); ret = GS_ALL_DONE; break; case OMP_TASK: gimplify_omp_task (expr_p, pre_p); ret = GS_ALL_DONE; break; case OMP_FOR: case OMP_SIMD: case OMP_DISTRIBUTE: case OMP_TASKLOOP: case OACC_LOOP: ret = gimplify_omp_for (expr_p, pre_p); break; case OMP_LOOP: ret = gimplify_omp_loop (expr_p, pre_p); break; case OACC_CACHE: gimplify_oacc_cache (expr_p, pre_p); ret = GS_ALL_DONE; break; case OACC_DECLARE: gimplify_oacc_declare (expr_p, pre_p); ret = GS_ALL_DONE; break; case OACC_HOST_DATA: case OACC_DATA: case OACC_KERNELS: case OACC_PARALLEL: case OACC_SERIAL: case OMP_SECTIONS: case OMP_SINGLE: case OMP_TARGET: case OMP_TARGET_DATA: case OMP_TEAMS: gimplify_omp_workshare (expr_p, pre_p); ret = GS_ALL_DONE; break; case OACC_ENTER_DATA: case OACC_EXIT_DATA: case OACC_UPDATE: case OMP_TARGET_UPDATE: case OMP_TARGET_ENTER_DATA: case OMP_TARGET_EXIT_DATA: gimplify_omp_target_update (expr_p, pre_p); ret = GS_ALL_DONE; break; case OMP_SECTION: case OMP_MASTER: case OMP_ORDERED: case OMP_CRITICAL: case OMP_SCAN: { gimple_seq body = NULL; gimple *g; bool saved_in_omp_construct = in_omp_construct; in_omp_construct = true; gimplify_and_add (OMP_BODY (*expr_p), &body); in_omp_construct = saved_in_omp_construct; switch (TREE_CODE (*expr_p)) { case OMP_SECTION: g = gimple_build_omp_section (body); break; case OMP_MASTER: g = gimple_build_omp_master (body); break; case OMP_ORDERED: g = gimplify_omp_ordered (*expr_p, body); break; case OMP_CRITICAL: gimplify_scan_omp_clauses (&OMP_CRITICAL_CLAUSES (*expr_p), pre_p, ORT_WORKSHARE, OMP_CRITICAL); gimplify_adjust_omp_clauses (pre_p, body, &OMP_CRITICAL_CLAUSES (*expr_p), OMP_CRITICAL); g = gimple_build_omp_critical (body, OMP_CRITICAL_NAME (*expr_p), OMP_CRITICAL_CLAUSES (*expr_p)); break; case OMP_SCAN: gimplify_scan_omp_clauses (&OMP_SCAN_CLAUSES (*expr_p), pre_p, ORT_WORKSHARE, OMP_SCAN); gimplify_adjust_omp_clauses (pre_p, body, &OMP_SCAN_CLAUSES (*expr_p), OMP_SCAN); g = gimple_build_omp_scan (body, OMP_SCAN_CLAUSES (*expr_p)); break; default: gcc_unreachable (); } gimplify_seq_add_stmt (pre_p, g); ret = GS_ALL_DONE; break; } case OMP_TASKGROUP: { gimple_seq body = NULL; tree *pclauses = &OMP_TASKGROUP_CLAUSES (*expr_p); bool saved_in_omp_construct = in_omp_construct; gimplify_scan_omp_clauses (pclauses, pre_p, ORT_TASKGROUP, OMP_TASKGROUP); gimplify_adjust_omp_clauses (pre_p, NULL, pclauses, OMP_TASKGROUP); in_omp_construct = true; gimplify_and_add (OMP_BODY (*expr_p), &body); in_omp_construct = saved_in_omp_construct; gimple_seq cleanup = NULL; tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_END); gimple *g = gimple_build_call (fn, 0); gimple_seq_add_stmt (&cleanup, g); g = gimple_build_try (body, cleanup, GIMPLE_TRY_FINALLY); body = NULL; gimple_seq_add_stmt (&body, g); g = gimple_build_omp_taskgroup (body, *pclauses); gimplify_seq_add_stmt (pre_p, g); ret = GS_ALL_DONE; break; } case OMP_ATOMIC: case OMP_ATOMIC_READ: case OMP_ATOMIC_CAPTURE_OLD: case OMP_ATOMIC_CAPTURE_NEW: ret = gimplify_omp_atomic (expr_p, pre_p); break; case TRANSACTION_EXPR: ret = gimplify_transaction (expr_p, pre_p); break; case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: { tree orig_type = TREE_TYPE (*expr_p); tree new_type, xop0, xop1; *expr_p = gimple_boolify (*expr_p); new_type = TREE_TYPE (*expr_p); if (!useless_type_conversion_p (orig_type, new_type)) { *expr_p = fold_convert_loc (input_location, orig_type, *expr_p); ret = GS_OK; break; } /* Boolified binary truth expressions are semantically equivalent to bitwise binary expressions. Canonicalize them to the bitwise variant. */ switch (TREE_CODE (*expr_p)) { case TRUTH_AND_EXPR: TREE_SET_CODE (*expr_p, BIT_AND_EXPR); break; case TRUTH_OR_EXPR: TREE_SET_CODE (*expr_p, BIT_IOR_EXPR); break; case TRUTH_XOR_EXPR: TREE_SET_CODE (*expr_p, BIT_XOR_EXPR); break; default: break; } /* Now make sure that operands have compatible type to expression's new_type. */ xop0 = TREE_OPERAND (*expr_p, 0); xop1 = TREE_OPERAND (*expr_p, 1); if (!useless_type_conversion_p (new_type, TREE_TYPE (xop0))) TREE_OPERAND (*expr_p, 0) = fold_convert_loc (input_location, new_type, xop0); if (!useless_type_conversion_p (new_type, TREE_TYPE (xop1))) TREE_OPERAND (*expr_p, 1) = fold_convert_loc (input_location, new_type, xop1); /* Continue classified as tcc_binary. */ goto expr_2; } case VEC_COND_EXPR: goto expr_3; case VEC_PERM_EXPR: /* Classified as tcc_expression. */ goto expr_3; case BIT_INSERT_EXPR: /* Argument 3 is a constant. */ goto expr_2; case POINTER_PLUS_EXPR: { enum gimplify_status r0, r1; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); recalculate_side_effects (*expr_p); ret = MIN (r0, r1); break; } default: switch (TREE_CODE_CLASS (TREE_CODE (*expr_p))) { case tcc_comparison: /* Handle comparison of objects of non scalar mode aggregates with a call to memcmp. It would be nice to only have to do this for variable-sized objects, but then we'd have to allow the same nest of reference nodes we allow for MODIFY_EXPR and that's too complex. Compare scalar mode aggregates as scalar mode values. Using memcmp for them would be very inefficient at best, and is plain wrong if bitfields are involved. */ { tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 1)); /* Vector comparisons need no boolification. */ if (TREE_CODE (type) == VECTOR_TYPE) goto expr_2; else if (!AGGREGATE_TYPE_P (type)) { tree org_type = TREE_TYPE (*expr_p); *expr_p = gimple_boolify (*expr_p); if (!useless_type_conversion_p (org_type, TREE_TYPE (*expr_p))) { *expr_p = fold_convert_loc (input_location, org_type, *expr_p); ret = GS_OK; } else goto expr_2; } else if (TYPE_MODE (type) != BLKmode) ret = gimplify_scalar_mode_aggregate_compare (expr_p); else ret = gimplify_variable_sized_compare (expr_p); break; } /* If *EXPR_P does not need to be special-cased, handle it according to its class. */ case tcc_unary: ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); break; case tcc_binary: expr_2: { enum gimplify_status r0, r1; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (r0, r1); break; } expr_3: { enum gimplify_status r0, r1, r2; r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, is_gimple_val, fb_rvalue); r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, is_gimple_val, fb_rvalue); r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p, post_p, is_gimple_val, fb_rvalue); ret = MIN (MIN (r0, r1), r2); break; } case tcc_declaration: case tcc_constant: ret = GS_ALL_DONE; goto dont_recalculate; default: gcc_unreachable (); } recalculate_side_effects (*expr_p); dont_recalculate: break; } gcc_assert (*expr_p || ret != GS_OK); } while (ret == GS_OK); /* If we encountered an error_mark somewhere nested inside, either stub out the statement or propagate the error back out. */ if (ret == GS_ERROR) { if (is_statement) *expr_p = NULL; goto out; } /* This was only valid as a return value from the langhook, which we handled. Make sure it doesn't escape from any other context. */ gcc_assert (ret != GS_UNHANDLED); if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p)) { /* We aren't looking for a value, and we don't have a valid statement. If it doesn't have side-effects, throw it away. We can also get here with code such as "*&&L;", where L is a LABEL_DECL that is marked as FORCED_LABEL. */ if (TREE_CODE (*expr_p) == LABEL_DECL || !TREE_SIDE_EFFECTS (*expr_p)) *expr_p = NULL; else if (!TREE_THIS_VOLATILE (*expr_p)) { /* This is probably a _REF that contains something nested that has side effects. Recurse through the operands to find it. */ enum tree_code code = TREE_CODE (*expr_p); switch (code) { case COMPONENT_REF: case REALPART_EXPR: case IMAGPART_EXPR: case VIEW_CONVERT_EXPR: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); break; case ARRAY_REF: case ARRAY_RANGE_REF: gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p, gimple_test_f, fallback); gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p, gimple_test_f, fallback); break; default: /* Anything else with side-effects must be converted to a valid statement before we get here. */ gcc_unreachable (); } *expr_p = NULL; } else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p)) && TYPE_MODE (TREE_TYPE (*expr_p)) != BLKmode) { /* Historically, the compiler has treated a bare reference to a non-BLKmode volatile lvalue as forcing a load. */ tree type = TYPE_MAIN_VARIANT (TREE_TYPE (*expr_p)); /* Normally, we do not want to create a temporary for a TREE_ADDRESSABLE type because such a type should not be copied by bitwise-assignment. However, we make an exception here, as all we are doing here is ensuring that we read the bytes that make up the type. We use create_tmp_var_raw because create_tmp_var will abort when given a TREE_ADDRESSABLE type. */ tree tmp = create_tmp_var_raw (type, "vol"); gimple_add_tmp_var (tmp); gimplify_assign (tmp, *expr_p, pre_p); *expr_p = NULL; } else /* We can't do anything useful with a volatile reference to an incomplete type, so just throw it away. Likewise for a BLKmode type, since any implicit inner load should already have been turned into an explicit one by the gimplification process. */ *expr_p = NULL; } /* If we are gimplifying at the statement level, we're done. Tack everything together and return. */ if (fallback == fb_none || is_statement) { /* Since *EXPR_P has been converted into a GIMPLE tuple, clear it out for GC to reclaim it. */ *expr_p = NULL_TREE; if (!gimple_seq_empty_p (internal_pre) || !gimple_seq_empty_p (internal_post)) { gimplify_seq_add_seq (&internal_pre, internal_post); gimplify_seq_add_seq (pre_p, internal_pre); } /* The result of gimplifying *EXPR_P is going to be the last few statements in *PRE_P and *POST_P. Add location information to all the statements that were added by the gimplification helpers. */ if (!gimple_seq_empty_p (*pre_p)) annotate_all_with_location_after (*pre_p, pre_last_gsi, input_location); if (!gimple_seq_empty_p (*post_p)) annotate_all_with_location_after (*post_p, post_last_gsi, input_location); goto out; } #ifdef ENABLE_GIMPLE_CHECKING if (*expr_p) { enum tree_code code = TREE_CODE (*expr_p); /* These expressions should already be in gimple IR form. */ gcc_assert (code != MODIFY_EXPR && code != ASM_EXPR && code != BIND_EXPR && code != CATCH_EXPR && (code != COND_EXPR || gimplify_ctxp->allow_rhs_cond_expr) && code != EH_FILTER_EXPR && code != GOTO_EXPR && code != LABEL_EXPR && code != LOOP_EXPR && code != SWITCH_EXPR && code != TRY_FINALLY_EXPR && code != EH_ELSE_EXPR && code != OACC_PARALLEL && code != OACC_KERNELS && code != OACC_SERIAL && code != OACC_DATA && code != OACC_HOST_DATA && code != OACC_DECLARE && code != OACC_UPDATE && code != OACC_ENTER_DATA && code != OACC_EXIT_DATA && code != OACC_CACHE && code != OMP_CRITICAL && code != OMP_FOR && code != OACC_LOOP && code != OMP_MASTER && code != OMP_TASKGROUP && code != OMP_ORDERED && code != OMP_PARALLEL && code != OMP_SCAN && code != OMP_SECTIONS && code != OMP_SECTION && code != OMP_SINGLE); } #endif /* Otherwise we're gimplifying a subexpression, so the resulting value is interesting. If it's a valid operand that matches GIMPLE_TEST_F, we're done. Unless we are handling some post-effects internally; if that's the case, we need to copy into a temporary before adding the post-effects to POST_P. */ if (gimple_seq_empty_p (internal_post) && (*gimple_test_f) (*expr_p)) goto out; /* Otherwise, we need to create a new temporary for the gimplified expression. */ /* We can't return an lvalue if we have an internal postqueue. The object the lvalue refers to would (probably) be modified by the postqueue; we need to copy the value out first, which means an rvalue. */ if ((fallback & fb_lvalue) && gimple_seq_empty_p (internal_post) && is_gimple_addressable (*expr_p)) { /* An lvalue will do. Take the address of the expression, store it in a temporary, and replace the expression with an INDIRECT_REF of that temporary. */ tree ref_alias_type = reference_alias_ptr_type (*expr_p); unsigned int ref_align = get_object_alignment (*expr_p); tree ref_type = TREE_TYPE (*expr_p); tmp = build_fold_addr_expr_loc (input_location, *expr_p); gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue); if (TYPE_ALIGN (ref_type) != ref_align) ref_type = build_aligned_type (ref_type, ref_align); *expr_p = build2 (MEM_REF, ref_type, tmp, build_zero_cst (ref_alias_type)); } else if ((fallback & fb_rvalue) && is_gimple_reg_rhs_or_call (*expr_p)) { /* An rvalue will do. Assign the gimplified expression into a new temporary TMP and replace the original expression with TMP. First, make sure that the expression has a type so that it can be assigned into a temporary. */ gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p))); *expr_p = get_formal_tmp_var (*expr_p, pre_p); } else { #ifdef ENABLE_GIMPLE_CHECKING if (!(fallback & fb_mayfail)) { fprintf (stderr, "gimplification failed:\n"); print_generic_expr (stderr, *expr_p); debug_tree (*expr_p); internal_error ("gimplification failed"); } #endif gcc_assert (fallback & fb_mayfail); /* If this is an asm statement, and the user asked for the impossible, don't die. Fail and let gimplify_asm_expr issue an error. */ ret = GS_ERROR; goto out; } /* Make sure the temporary matches our predicate. */ gcc_assert ((*gimple_test_f) (*expr_p)); if (!gimple_seq_empty_p (internal_post)) { annotate_all_with_location (internal_post, input_location); gimplify_seq_add_seq (pre_p, internal_post); } out: input_location = saved_location; return ret; } /* Like gimplify_expr but make sure the gimplified result is not itself a SSA name (but a decl if it were). Temporaries required by evaluating *EXPR_P may be still SSA names. */ static enum gimplify_status gimplify_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p, bool (*gimple_test_f) (tree), fallback_t fallback, bool allow_ssa) { bool was_ssa_name_p = TREE_CODE (*expr_p) == SSA_NAME; enum gimplify_status ret = gimplify_expr (expr_p, pre_p, post_p, gimple_test_f, fallback); if (! allow_ssa && TREE_CODE (*expr_p) == SSA_NAME) { tree name = *expr_p; if (was_ssa_name_p) *expr_p = get_initialized_tmp_var (*expr_p, pre_p, NULL, false); else { /* Avoid the extra copy if possible. */ *expr_p = create_tmp_reg (TREE_TYPE (name)); if (!gimple_nop_p (SSA_NAME_DEF_STMT (name))) gimple_set_lhs (SSA_NAME_DEF_STMT (name), *expr_p); release_ssa_name (name); } } return ret; } /* Look through TYPE for variable-sized objects and gimplify each such size that we find. Add to LIST_P any statements generated. */ void gimplify_type_sizes (tree type, gimple_seq *list_p) { tree field, t; if (type == NULL || type == error_mark_node) return; /* We first do the main variant, then copy into any other variants. */ type = TYPE_MAIN_VARIANT (type); /* Avoid infinite recursion. */ if (TYPE_SIZES_GIMPLIFIED (type)) return; TYPE_SIZES_GIMPLIFIED (type) = 1; switch (TREE_CODE (type)) { case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case REAL_TYPE: case FIXED_POINT_TYPE: gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p); gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (type); TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (type); } break; case ARRAY_TYPE: /* These types may not have declarations, so handle them here. */ gimplify_type_sizes (TREE_TYPE (type), list_p); gimplify_type_sizes (TYPE_DOMAIN (type), list_p); /* Ensure VLA bounds aren't removed, for -O0 they should be variables with assigned stack slots, for -O1+ -g they should be tracked by VTA. */ if (!(TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL && DECL_IGNORED_P (TYPE_NAME (type))) && TYPE_DOMAIN (type) && INTEGRAL_TYPE_P (TYPE_DOMAIN (type))) { t = TYPE_MIN_VALUE (TYPE_DOMAIN (type)); if (t && VAR_P (t) && DECL_ARTIFICIAL (t)) DECL_IGNORED_P (t) = 0; t = TYPE_MAX_VALUE (TYPE_DOMAIN (type)); if (t && VAR_P (t) && DECL_ARTIFICIAL (t)) DECL_IGNORED_P (t) = 0; } break; case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) if (TREE_CODE (field) == FIELD_DECL) { gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p); gimplify_one_sizepos (&DECL_SIZE (field), list_p); gimplify_one_sizepos (&DECL_SIZE_UNIT (field), list_p); gimplify_type_sizes (TREE_TYPE (field), list_p); } break; case POINTER_TYPE: case REFERENCE_TYPE: /* We used to recurse on the pointed-to type here, which turned out to be incorrect because its definition might refer to variables not yet initialized at this point if a forward declaration is involved. It was actually useful for anonymous pointed-to types to ensure that the sizes evaluation dominates every possible later use of the values. Restricting to such types here would be safe since there is no possible forward declaration around, but would introduce an undesirable middle-end semantic to anonymity. We then defer to front-ends the responsibility of ensuring that the sizes are evaluated both early and late enough, e.g. by attaching artificial type declarations to the tree. */ break; default: break; } gimplify_one_sizepos (&TYPE_SIZE (type), list_p); gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p); for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t)) { TYPE_SIZE (t) = TYPE_SIZE (type); TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (type); TYPE_SIZES_GIMPLIFIED (t) = 1; } } /* A subroutine of gimplify_type_sizes to make sure that *EXPR_P, a size or position, has had all of its SAVE_EXPRs evaluated. We add any required statements to *STMT_P. */ void gimplify_one_sizepos (tree *expr_p, gimple_seq *stmt_p) { tree expr = *expr_p; /* We don't do anything if the value isn't there, is constant, or contains A PLACEHOLDER_EXPR. We also don't want to do anything if it's already a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier will want to replace it with a new variable, but that will cause problems if this type is from outside the function. It's OK to have that here. */ if (expr == NULL_TREE || is_gimple_constant (expr) || TREE_CODE (expr) == VAR_DECL || CONTAINS_PLACEHOLDER_P (expr)) return; *expr_p = unshare_expr (expr); /* SSA names in decl/type fields are a bad idea - they'll get reclaimed if the def vanishes. */ gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue, false); /* If expr wasn't already is_gimple_sizepos or is_gimple_constant from the FE, ensure that it is a VAR_DECL, otherwise we might handle some decls as gimplify_vla_decl even when they would have all sizes INTEGER_CSTs. */ if (is_gimple_constant (*expr_p)) *expr_p = get_initialized_tmp_var (*expr_p, stmt_p, NULL, false); } /* Gimplify the body of statements of FNDECL and return a GIMPLE_BIND node containing the sequence of corresponding GIMPLE statements. If DO_PARMS is true, also gimplify the parameters. */ gbind * gimplify_body (tree fndecl, bool do_parms) { location_t saved_location = input_location; gimple_seq parm_stmts, parm_cleanup = NULL, seq; gimple *outer_stmt; gbind *outer_bind; timevar_push (TV_TREE_GIMPLIFY); init_tree_ssa (cfun); /* Initialize for optimize_insn_for_s{ize,peed}_p possibly called during gimplification. */ default_rtl_profile (); gcc_assert (gimplify_ctxp == NULL); push_gimplify_context (true); if (flag_openacc || flag_openmp) { gcc_assert (gimplify_omp_ctxp == NULL); if (lookup_attribute ("omp declare target", DECL_ATTRIBUTES (fndecl))) gimplify_omp_ctxp = new_omp_context (ORT_IMPLICIT_TARGET); } /* Unshare most shared trees in the body and in that of any nested functions. It would seem we don't have to do this for nested functions because they are supposed to be output and then the outer function gimplified first, but the g++ front end doesn't always do it that way. */ unshare_body (fndecl); unvisit_body (fndecl); /* Make sure input_location isn't set to something weird. */ input_location = DECL_SOURCE_LOCATION (fndecl); /* Resolve callee-copies. This has to be done before processing the body so that DECL_VALUE_EXPR gets processed correctly. */ parm_stmts = do_parms ? gimplify_parameters (&parm_cleanup) : NULL; /* Gimplify the function's body. */ seq = NULL; gimplify_stmt (&DECL_SAVED_TREE (fndecl), &seq); outer_stmt = gimple_seq_first_nondebug_stmt (seq); if (!outer_stmt) { outer_stmt = gimple_build_nop (); gimplify_seq_add_stmt (&seq, outer_stmt); } /* The body must contain exactly one statement, a GIMPLE_BIND. If this is not the case, wrap everything in a GIMPLE_BIND to make it so. */ if (gimple_code (outer_stmt) == GIMPLE_BIND && (gimple_seq_first_nondebug_stmt (seq) == gimple_seq_last_nondebug_stmt (seq))) { outer_bind = as_a <gbind *> (outer_stmt); if (gimple_seq_first_stmt (seq) != outer_stmt || gimple_seq_last_stmt (seq) != outer_stmt) { /* If there are debug stmts before or after outer_stmt, move them inside of outer_bind body. */ gimple_stmt_iterator gsi = gsi_for_stmt (outer_stmt, &seq); gimple_seq second_seq = NULL; if (gimple_seq_first_stmt (seq) != outer_stmt && gimple_seq_last_stmt (seq) != outer_stmt) { second_seq = gsi_split_seq_after (gsi); gsi_remove (&gsi, false); } else if (gimple_seq_first_stmt (seq) != outer_stmt) gsi_remove (&gsi, false); else { gsi_remove (&gsi, false); second_seq = seq; seq = NULL; } gimple_seq_add_seq_without_update (&seq, gimple_bind_body (outer_bind)); gimple_seq_add_seq_without_update (&seq, second_seq); gimple_bind_set_body (outer_bind, seq); } } else outer_bind = gimple_build_bind (NULL_TREE, seq, NULL); DECL_SAVED_TREE (fndecl) = NULL_TREE; /* If we had callee-copies statements, insert them at the beginning of the function and clear DECL_VALUE_EXPR_P on the parameters. */ if (!gimple_seq_empty_p (parm_stmts)) { tree parm; gimplify_seq_add_seq (&parm_stmts, gimple_bind_body (outer_bind)); if (parm_cleanup) { gtry *g = gimple_build_try (parm_stmts, parm_cleanup, GIMPLE_TRY_FINALLY); parm_stmts = NULL; gimple_seq_add_stmt (&parm_stmts, g); } gimple_bind_set_body (outer_bind, parm_stmts); for (parm = DECL_ARGUMENTS (current_function_decl); parm; parm = DECL_CHAIN (parm)) if (DECL_HAS_VALUE_EXPR_P (parm)) { DECL_HAS_VALUE_EXPR_P (parm) = 0; DECL_IGNORED_P (parm) = 0; } } if ((flag_openacc || flag_openmp || flag_openmp_simd) && gimplify_omp_ctxp) { delete_omp_context (gimplify_omp_ctxp); gimplify_omp_ctxp = NULL; } pop_gimplify_context (outer_bind); gcc_assert (gimplify_ctxp == NULL); if (flag_checking && !seen_error ()) verify_gimple_in_seq (gimple_bind_body (outer_bind)); timevar_pop (TV_TREE_GIMPLIFY); input_location = saved_location; return outer_bind; } typedef char *char_p; /* For DEF_VEC_P. */ /* Return whether we should exclude FNDECL from instrumentation. */ static bool flag_instrument_functions_exclude_p (tree fndecl) { vec<char_p> *v; v = (vec<char_p> *) flag_instrument_functions_exclude_functions; if (v && v->length () > 0) { const char *name; int i; char *s; name = lang_hooks.decl_printable_name (fndecl, 1); FOR_EACH_VEC_ELT (*v, i, s) if (strstr (name, s) != NULL) return true; } v = (vec<char_p> *) flag_instrument_functions_exclude_files; if (v && v->length () > 0) { const char *name; int i; char *s; name = DECL_SOURCE_FILE (fndecl); FOR_EACH_VEC_ELT (*v, i, s) if (strstr (name, s) != NULL) return true; } return false; } /* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL node for the function we want to gimplify. Return the sequence of GIMPLE statements corresponding to the body of FNDECL. */ void gimplify_function_tree (tree fndecl) { gimple_seq seq; gbind *bind; gcc_assert (!gimple_body (fndecl)); if (DECL_STRUCT_FUNCTION (fndecl)) push_cfun (DECL_STRUCT_FUNCTION (fndecl)); else push_struct_function (fndecl); /* Tentatively set PROP_gimple_lva here, and reset it in gimplify_va_arg_expr if necessary. */ cfun->curr_properties |= PROP_gimple_lva; if (asan_sanitize_use_after_scope () && sanitize_flags_p (SANITIZE_ADDRESS)) asan_poisoned_variables = new hash_set<tree> (); bind = gimplify_body (fndecl, true); if (asan_poisoned_variables) { delete asan_poisoned_variables; asan_poisoned_variables = NULL; } /* The tree body of the function is no longer needed, replace it with the new GIMPLE body. */ seq = NULL; gimple_seq_add_stmt (&seq, bind); gimple_set_body (fndecl, seq); /* If we're instrumenting function entry/exit, then prepend the call to the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to catch the exit hook. */ /* ??? Add some way to ignore exceptions for this TFE. */ if (flag_instrument_function_entry_exit && !DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl) /* Do not instrument extern inline functions. */ && !(DECL_DECLARED_INLINE_P (fndecl) && DECL_EXTERNAL (fndecl) && DECL_DISREGARD_INLINE_LIMITS (fndecl)) && !flag_instrument_functions_exclude_p (fndecl)) { tree x; gbind *new_bind; gimple *tf; gimple_seq cleanup = NULL, body = NULL; tree tmp_var, this_fn_addr; gcall *call; /* The instrumentation hooks aren't going to call the instrumented function and the address they receive is expected to be matchable against symbol addresses. Make sure we don't create a trampoline, in case the current function is nested. */ this_fn_addr = build_fold_addr_expr (current_function_decl); TREE_NO_TRAMPOLINE (this_fn_addr) = 1; x = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS); call = gimple_build_call (x, 1, integer_zero_node); tmp_var = create_tmp_var (ptr_type_node, "return_addr"); gimple_call_set_lhs (call, tmp_var); gimplify_seq_add_stmt (&cleanup, call); x = builtin_decl_implicit (BUILT_IN_PROFILE_FUNC_EXIT); call = gimple_build_call (x, 2, this_fn_addr, tmp_var); gimplify_seq_add_stmt (&cleanup, call); tf = gimple_build_try (seq, cleanup, GIMPLE_TRY_FINALLY); x = builtin_decl_implicit (BUILT_IN_RETURN_ADDRESS); call = gimple_build_call (x, 1, integer_zero_node); tmp_var = create_tmp_var (ptr_type_node, "return_addr"); gimple_call_set_lhs (call, tmp_var); gimplify_seq_add_stmt (&body, call); x = builtin_decl_implicit (BUILT_IN_PROFILE_FUNC_ENTER); call = gimple_build_call (x, 2, this_fn_addr, tmp_var); gimplify_seq_add_stmt (&body, call); gimplify_seq_add_stmt (&body, tf); new_bind = gimple_build_bind (NULL, body, NULL); /* Replace the current function body with the body wrapped in the try/finally TF. */ seq = NULL; gimple_seq_add_stmt (&seq, new_bind); gimple_set_body (fndecl, seq); bind = new_bind; } if (sanitize_flags_p (SANITIZE_THREAD) && param_tsan_instrument_func_entry_exit) { gcall *call = gimple_build_call_internal (IFN_TSAN_FUNC_EXIT, 0); gimple *tf = gimple_build_try (seq, call, GIMPLE_TRY_FINALLY); gbind *new_bind = gimple_build_bind (NULL, tf, NULL); /* Replace the current function body with the body wrapped in the try/finally TF. */ seq = NULL; gimple_seq_add_stmt (&seq, new_bind); gimple_set_body (fndecl, seq); } DECL_SAVED_TREE (fndecl) = NULL_TREE; cfun->curr_properties |= PROP_gimple_any; pop_cfun (); dump_function (TDI_gimple, fndecl); } /* Return a dummy expression of type TYPE in order to keep going after an error. */ static tree dummy_object (tree type) { tree t = build_int_cst (build_pointer_type (type), 0); return build2 (MEM_REF, type, t, t); } /* Gimplify __builtin_va_arg, aka VA_ARG_EXPR, which is not really a builtin function, but a very special sort of operator. */ enum gimplify_status gimplify_va_arg_expr (tree *expr_p, gimple_seq *pre_p, gimple_seq *post_p ATTRIBUTE_UNUSED) { tree promoted_type, have_va_type; tree valist = TREE_OPERAND (*expr_p, 0); tree type = TREE_TYPE (*expr_p); tree t, tag, aptag; location_t loc = EXPR_LOCATION (*expr_p); /* Verify that valist is of the proper type. */ have_va_type = TREE_TYPE (valist); if (have_va_type == error_mark_node) return GS_ERROR; have_va_type = targetm.canonical_va_list_type (have_va_type); if (have_va_type == NULL_TREE && POINTER_TYPE_P (TREE_TYPE (valist))) /* Handle 'Case 1: Not an array type' from c-common.c/build_va_arg. */ have_va_type = targetm.canonical_va_list_type (TREE_TYPE (TREE_TYPE (valist))); gcc_assert (have_va_type != NULL_TREE); /* Generate a diagnostic for requesting data of a type that cannot be passed through `...' due to type promotion at the call site. */ if ((promoted_type = lang_hooks.types.type_promotes_to (type)) != type) { static bool gave_help; bool warned; /* Use the expansion point to handle cases such as passing bool (defined in a system header) through `...'. */ location_t xloc = expansion_point_location_if_in_system_header (loc); /* Unfortunately, this is merely undefined, rather than a constraint violation, so we cannot make this an error. If this call is never executed, the program is still strictly conforming. */ auto_diagnostic_group d; warned = warning_at (xloc, 0, "%qT is promoted to %qT when passed through %<...%>", type, promoted_type); if (!gave_help && warned) { gave_help = true; inform (xloc, "(so you should pass %qT not %qT to %<va_arg%>)", promoted_type, type); } /* We can, however, treat "undefined" any way we please. Call abort to encourage the user to fix the program. */ if (warned) inform (xloc, "if this code is reached, the program will abort"); /* Before the abort, allow the evaluation of the va_list expression to exit or longjmp. */ gimplify_and_add (valist, pre_p); t = build_call_expr_loc (loc, builtin_decl_implicit (BUILT_IN_TRAP), 0); gimplify_and_add (t, pre_p); /* This is dead code, but go ahead and finish so that the mode of the result comes out right. */ *expr_p = dummy_object (type); return GS_ALL_DONE; } tag = build_int_cst (build_pointer_type (type), 0); aptag = build_int_cst (TREE_TYPE (valist), 0); *expr_p = build_call_expr_internal_loc (loc, IFN_VA_ARG, type, 3, valist, tag, aptag); /* Clear the tentatively set PROP_gimple_lva, to indicate that IFN_VA_ARG needs to be expanded. */ cfun->curr_properties &= ~PROP_gimple_lva; return GS_OK; } /* Build a new GIMPLE_ASSIGN tuple and append it to the end of *SEQ_P. DST/SRC are the destination and source respectively. You can pass ungimplified trees in DST or SRC, in which case they will be converted to a gimple operand if necessary. This function returns the newly created GIMPLE_ASSIGN tuple. */ gimple * gimplify_assign (tree dst, tree src, gimple_seq *seq_p) { tree t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src); gimplify_and_add (t, seq_p); ggc_free (t); return gimple_seq_last_stmt (*seq_p); } inline hashval_t gimplify_hasher::hash (const elt_t *p) { tree t = p->val; return iterative_hash_expr (t, 0); } inline bool gimplify_hasher::equal (const elt_t *p1, const elt_t *p2) { tree t1 = p1->val; tree t2 = p2->val; enum tree_code code = TREE_CODE (t1); if (TREE_CODE (t2) != code || TREE_TYPE (t1) != TREE_TYPE (t2)) return false; if (!operand_equal_p (t1, t2, 0)) return false; /* Only allow them to compare equal if they also hash equal; otherwise results are nondeterminate, and we fail bootstrap comparison. */ gcc_checking_assert (hash (p1) == hash (p2)); return true; }
pack_kernel_c.c
/*Crown Copyright 2012 AWE. * * This file is part of CloverLeaf. * * CloverLeaf is free software: you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the * Free Software Foundation, either version 3 of the License, or (at your option) * any later version. * * CloverLeaf is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * CloverLeaf. If not, see http://www.gnu.org/licenses/. */ /** * @brief C mpi buffer packing kernel * @author Wayne Gaudin * @details Packs/unpacks mpi send and receive buffers */ #include <stdio.h> #include <stdlib.h> #include "ftocmacros.h" #include <math.h> void clover_pack_message_left_c_(int *xmin,int *xmax,int *ymin,int *ymax, double *field, double *left_snd_buffer, int *CLL_DT,int *VRTX_DT,int *X_FC_DT,int *Y_FC_DT, int *dpth, int *fld_typ, int *bffr_ffst) { int x_min=*xmin; int x_max=*xmax; int y_min=*ymin; int y_max=*ymax; int CELL_DATA=*CLL_DT; int VERTEX_DATA=*VRTX_DT; int X_FACE_DATA=*X_FC_DT; int Y_FACE_DATA=*Y_FC_DT; int field_type=*fld_typ; int depth=*dpth; int buffer_offset=*bffr_ffst; int j,k,index,x_inc,y_inc; //Pack // These array modifications still need to be added on, plus the donor data location changes as in update_halo if(field_type==CELL_DATA) { x_inc=0; y_inc=0; } if(field_type==VERTEX_DATA) { x_inc=1; y_inc=1; } if(field_type==X_FACE_DATA) { x_inc=1; y_inc=0; } if(field_type==Y_FACE_DATA) { x_inc=0; y_inc=1; } #pragma omp parallel for private(j,k,index) for (k=y_min-depth;k<=y_max+y_inc+depth;k++) { #pragma ivdep for (j=1;j<=depth;j++) { index=buffer_offset + j+(k+depth-1)*depth; left_snd_buffer[FTNREF1D(index,1)]=field[FTNREF2D(x_min+x_inc-1+j,k,x_max+4+x_inc,x_min-2,y_min-2)]; } } } void clover_unpack_message_left_c_(int *xmin,int *xmax,int *ymin,int *ymax, double *field, double *left_rcv_buffer, int *CLL_DT,int *VRTX_DT,int *X_FC_DT,int *Y_FC_DT, int *dpth, int *fld_typ, int *bffr_ffst) { int x_min=*xmin; int x_max=*xmax; int y_min=*ymin; int y_max=*ymax; int CELL_DATA=*CLL_DT; int VERTEX_DATA=*VRTX_DT; int X_FACE_DATA=*X_FC_DT; int Y_FACE_DATA=*Y_FC_DT; int field_type=*fld_typ; int depth=*dpth; int buffer_offset=*bffr_ffst; int j,k,index,x_inc,y_inc; //Unpack // These array modifications still need to be added on, plus the donor data location changes as in update_halo if(field_type==CELL_DATA) { x_inc=0; y_inc=0; } if(field_type==VERTEX_DATA) { x_inc=1; y_inc=1; } if(field_type==X_FACE_DATA) { x_inc=1; y_inc=0; } if(field_type==Y_FACE_DATA) { x_inc=0; y_inc=1; } #pragma omp parallel for private(j,k,index) for (k=y_min-depth;k<=y_max+y_inc+depth;k++) { #pragma ivdep for (j=1;j<=depth;j++) { index=buffer_offset + j+(k+depth-1)*depth; field[FTNREF2D(x_min-j,k,x_max+4+x_inc,x_min-2,y_min-2)]=left_rcv_buffer[FTNREF1D(index,1)]; } } } void clover_pack_message_right_c_(int *xmin,int *xmax,int *ymin,int *ymax, double *field, double *right_snd_buffer, int *CLL_DT,int *VRTX_DT,int *X_FC_DT,int *Y_FC_DT, int *dpth, int *fld_typ, int *bffr_ffst) { int x_min=*xmin; int x_max=*xmax; int y_min=*ymin; int y_max=*ymax; int CELL_DATA=*CLL_DT; int VERTEX_DATA=*VRTX_DT; int X_FACE_DATA=*X_FC_DT; int Y_FACE_DATA=*Y_FC_DT; int field_type=*fld_typ; int depth=*dpth; int buffer_offset=*bffr_ffst; int j,k,index,x_inc,y_inc; //Pack // These array modifications still need to be added on, plus the donor data location changes as in update_halo if(field_type==CELL_DATA) { x_inc=0; y_inc=0; } if(field_type==VERTEX_DATA) { x_inc=1; y_inc=1; } if(field_type==X_FACE_DATA) { x_inc=1; y_inc=0; } if(field_type==Y_FACE_DATA) { x_inc=0; y_inc=1; } #pragma omp parallel for private(j,k,index) for (k=y_min-depth;k<=y_max+y_inc+depth;k++) { #pragma ivdep for (j=1;j<=depth;j++) { index=buffer_offset + j+(k+depth-1)*depth; right_snd_buffer[FTNREF1D(index,1)]=field[FTNREF2D(x_max+1-j,k,x_max+4+x_inc,x_min-2,y_min-2)]; } } } void clover_unpack_message_right_c_(int *xmin,int *xmax,int *ymin,int *ymax, double *field, double *right_rcv_buffer, int *CLL_DT,int *VRTX_DT,int *X_FC_DT,int *Y_FC_DT, int *dpth, int *fld_typ, int *bffr_ffst) { int x_min=*xmin; int x_max=*xmax; int y_min=*ymin; int y_max=*ymax; int CELL_DATA=*CLL_DT; int VERTEX_DATA=*VRTX_DT; int X_FACE_DATA=*X_FC_DT; int Y_FACE_DATA=*Y_FC_DT; int field_type=*fld_typ; int depth=*dpth; int buffer_offset=*bffr_ffst; int j,k,index,x_inc,y_inc; //Pack // These array modifications still need to be added on, plus the donor data location changes as in update_halo if(field_type==CELL_DATA) { x_inc=0; y_inc=0; } if(field_type==VERTEX_DATA) { x_inc=1; y_inc=1; } if(field_type==X_FACE_DATA) { x_inc=1; y_inc=0; } if(field_type==Y_FACE_DATA) { x_inc=0; y_inc=1; } #pragma omp parallel for private(j,k,index) for (k=y_min-depth;k<=y_max+y_inc+depth;k++) { #pragma ivdep for (j=1;j<=depth;j++) { index=buffer_offset + j+(k+depth-1)*depth; field[FTNREF2D(x_max+x_inc+j,k,x_max+4+x_inc,x_min-2,y_min-2)]=right_rcv_buffer[FTNREF1D(index,1)]; } } } void clover_pack_message_top_c_(int *xmin,int *xmax,int *ymin,int *ymax, double *field, double *top_snd_buffer, int *CLL_DT,int *VRTX_DT,int *X_FC_DT,int *Y_FC_DT, int *dpth, int *fld_typ, int *bffr_ffst) { int x_min=*xmin; int x_max=*xmax; int y_min=*ymin; int y_max=*ymax; int CELL_DATA=*CLL_DT; int VERTEX_DATA=*VRTX_DT; int X_FACE_DATA=*X_FC_DT; int Y_FACE_DATA=*Y_FC_DT; int field_type=*fld_typ; int depth=*dpth; int buffer_offset=*bffr_ffst; int j,k,index,x_inc,y_inc; //Pack // These array modifications still need to be added on, plus the donor data location changes as in update_halo if(field_type==CELL_DATA) { x_inc=0; y_inc=0; } if(field_type==VERTEX_DATA) { x_inc=1; y_inc=1; } if(field_type==X_FACE_DATA) { x_inc=1; y_inc=0; } if(field_type==Y_FACE_DATA) { x_inc=0; y_inc=1; } for (k=1;k<=depth;k++) { #pragma omp parallel for private(j,index) for (j=x_min-depth;j<=x_max+x_inc+depth;j++) { index= buffer_offset + k+(j+depth-1)*depth; top_snd_buffer[FTNREF1D(index,1)]=field[FTNREF2D(j,y_max+1-k,x_max+4+x_inc,x_min-2,y_min-2)]; } } } void clover_pack_message_bottom_c_(int *xmin,int *xmax,int *ymin,int *ymax, double *field, double *bottom_snd_buffer, int *CLL_DT,int *VRTX_DT,int *X_FC_DT,int *Y_FC_DT, int *dpth, int *fld_typ, int *bffr_ffst) { int x_min=*xmin; int x_max=*xmax; int y_min=*ymin; int y_max=*ymax; int CELL_DATA=*CLL_DT; int VERTEX_DATA=*VRTX_DT; int X_FACE_DATA=*X_FC_DT; int Y_FACE_DATA=*Y_FC_DT; int field_type=*fld_typ; int depth=*dpth; int buffer_offset=*bffr_ffst; int j,k,index,x_inc,y_inc; //Pack // These array modifications still need to be added on, plus the donor data location changes as in update_halo if(field_type==CELL_DATA) { x_inc=0; y_inc=0; } if(field_type==VERTEX_DATA) { x_inc=1; y_inc=1; } if(field_type==X_FACE_DATA) { x_inc=1; y_inc=0; } if(field_type==Y_FACE_DATA) { x_inc=0; y_inc=1; } for (k=1;k<=depth;k++) { #pragma omp parallel for private(j,index) for (j=x_min-depth;j<=x_max+x_inc+depth;j++) { index= buffer_offset + k+(j+depth-1)*depth; bottom_snd_buffer[FTNREF1D(index,1)]=field[FTNREF2D(j,y_min+y_inc-1+k,x_max+4+x_inc,x_min-2,y_min-2)]; } } } void clover_unpack_message_bottom_c_(int *xmin,int *xmax,int *ymin,int *ymax, double *field, double *bottom_rcv_buffer, int *CLL_DT,int *VRTX_DT,int *X_FC_DT,int *Y_FC_DT, int *dpth, int *fld_typ, int *bffr_ffst) { int x_min=*xmin; int x_max=*xmax; int y_min=*ymin; int y_max=*ymax; int CELL_DATA=*CLL_DT; int VERTEX_DATA=*VRTX_DT; int X_FACE_DATA=*X_FC_DT; int Y_FACE_DATA=*Y_FC_DT; int field_type=*fld_typ; int depth=*dpth; int buffer_offset=*bffr_ffst; int j,k,index,x_inc,y_inc; //Unpack // These array modifications still need to be added on, plus the donor data location changes as in update_halo if(field_type==CELL_DATA) { x_inc=0; y_inc=0; } if(field_type==VERTEX_DATA) { x_inc=1; y_inc=1; } if(field_type==X_FACE_DATA) { x_inc=1; y_inc=0; } if(field_type==Y_FACE_DATA) { x_inc=0; y_inc=1; } for (k=1;k<=depth;k++) { #pragma omp parallel for private(j,index) for (j=x_min-depth;j<=x_max+x_inc+depth;j++) { index= buffer_offset + k+(j+depth-1)*depth; field[FTNREF2D(j,y_min-k,x_max+4+x_inc,x_min-2,y_min-2)]=bottom_rcv_buffer[FTNREF1D(index,1)]; } } } void clover_unpack_message_top_c_(int *xmin,int *xmax,int *ymin,int *ymax, double *field, double *top_rcv_buffer, int *CLL_DT,int *VRTX_DT,int *X_FC_DT,int *Y_FC_DT, int *dpth, int *fld_typ, int *bffr_ffst) { int x_min=*xmin; int x_max=*xmax; int y_min=*ymin; int y_max=*ymax; int CELL_DATA=*CLL_DT; int VERTEX_DATA=*VRTX_DT; int X_FACE_DATA=*X_FC_DT; int Y_FACE_DATA=*Y_FC_DT; int field_type=*fld_typ; int depth=*dpth; int buffer_offset=*bffr_ffst; int j,k,index,x_inc,y_inc; //Unpack // These array modifications still need to be added on, plus the donor data location changes as in update_halo if(field_type==CELL_DATA) { x_inc=0; y_inc=0; } if(field_type==VERTEX_DATA) { x_inc=1; y_inc=1; } if(field_type==X_FACE_DATA) { x_inc=1; y_inc=0; } if(field_type==Y_FACE_DATA) { x_inc=0; y_inc=1; } for (k=1;k<=depth;k++) { #pragma omp parallel for private(j,index) for (j=x_min-depth;j<=x_max+x_inc+depth;j++) { index= buffer_offset + k+(j+depth-1)*depth; field[FTNREF2D(j,y_max+y_inc+k,x_max+4+x_inc,x_min-2,y_min-2)]=top_rcv_buffer[FTNREF1D(index,1)]; } } }
ospf_fmt_plug.c
/* * This software is Copyright (c) 2017, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Special thanks goes to the Loki project for providing the sample pcap files, * and for implementing the cryptographic functions involved in RFC 5709 * clearly. * * https://c0decafe.de/svn/codename_loki/ */ #if FMT_EXTERNS_H extern struct fmt_main fmt_ospf; #elif FMT_REGISTERS_H john_register_one(&fmt_ospf); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #endif #include "formats.h" #include "sha.h" #include "sha2.h" #include "hmac_sha.h" #include "misc.h" #include "common.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "ospf" #define FORMAT_NAME "OSPF / IS-IS" #define FORMAT_TAG "$ospf$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "HMAC-SHA-X 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define MAX_SALT_LEN 1500 + 64 // 64 is reserved for appending ospf_apad static struct fmt_tests tests[] = { /* ospf*.pcap from https://c0decafe.de/svn/codename_loki/ */ {"$ospf$1$02010030ac10001400000000000000020000011454ee4518ffffff00000a120100000028c0a86f14c0a86f0aac10000a$e59ba2c56a2c0429ebe72a194e4b54c250cac1a3", "1234"}, {"$ospf$2$0201002cac10000a00000000000000020000012054f4c8adffffff00000a120100000028c0a86f0a00000000$508a1abffb5b4554e1aa46eb053bca7105c3e8f6fece4c945f0a0020edb054ec", "1234"}, {"$ospf$3$0201002cac10000a00000000000000020000013054f4c8e4ffffff00000a120100000028c0a86f0a00000000$9dcf336773034f4ad8b0e19c52546ba72fd91d79d9416c9c1c4854002d3c0b5fc7c80fc1c4994ab9b6c48d9c6ac03587", "1234"}, {"$ospf$4$0201002cac10000a00000000000000020000014054f4c912ffffff00000a120100000028c0a86f0a00000000$4faa125881137ab3257ee9c8626d0ffa0c387c2e41a832d435afffc41d35881360fbe74442191a8aef201a4aad2689577a0c26a3cc5c681e72f09c297d16ba6a", "1234"}, /* isis*.pcap from https://c0decafe.de/svn/codename_loki/ */ {"$ospf$1$831401001101000301192168201101001b004e000104034900018102cc8e8404c0a8ca00f00f0000000003192168201104000000030a17030001$0a33e7acf138d0bfb2b197f331bbd8ae237e0465", "1234"}, {"$ospf$2$831401001101000301192168201101001b005a000104034900018102cc8e8404c0a8ca00f00f0000000003192168201104000000030a23030002$3082271800f8fab2976d57bb5d1d6e182189b9a2d542f48371da934f854acab9", "1234"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { uint32_t salt_length; uint32_t type; unsigned char salt[MAX_SALT_LEN]; // fixed len, but should be OK } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(saved_key); MEM_FREE(crypt_out); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int value, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) // type goto err; if (!isdec(p)) goto err; value = atoi(p); if (value != 1 && value != 2 && value != 3 && value != 4) goto err; if ((p = strtokm(NULL, "$")) == NULL) // salt goto err; if (hexlenl(p, &extra) > MAX_SALT_LEN * 2 || extra) goto err; if ((p = strtokm(NULL, "$")) == NULL) // binary goto err; value = hexlenl(p, &extra); if (value < 20 * 2 || value > 64 * 2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } // https://tools.ietf.org/rfc/rfc5709.txt and Loki static const char ospf_apad[] = { 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3, 0x87, 0x8F, 0xE1, 0xF3 }; static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; memset(&cs, 0, SALT_SIZE); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "$"); // type cs.type = atoi(p); p = strtokm(NULL, "$"); // salt cs.salt_length = strlen(p) / 2; for (i = 0; i < cs.salt_length; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; memcpy(cs.salt + cs.salt_length, ospf_apad, 64); MEM_FREE(keeptr); return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } #ifndef SHA_DIGEST_LENGTH #define SHA_DIGEST_LENGTH 20 #endif #ifndef SHA256_DIGEST_LENGTH #define SHA256_DIGEST_LENGTH 32 #endif #ifndef SHA384_DIGEST_LENGTH #define SHA384_DIGEST_LENGTH 48 #endif #ifndef SHA512_DIGEST_LENGTH #define SHA512_DIGEST_LENGTH 64 #endif static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int plen = strlen(saved_key[index]); unsigned char key[64]; unsigned char out[64]; if (cur_salt->type == 1) { SHA_CTX ctx; // process password according to rfc5709 if (plen < SHA_DIGEST_LENGTH) { memcpy(key, saved_key[index], plen); memset(key + plen, 0, SHA_DIGEST_LENGTH - plen); } else if (plen == SHA_DIGEST_LENGTH) { memcpy(key, saved_key[index], SHA_DIGEST_LENGTH); } else { SHA1_Init(&ctx); SHA1_Update(&ctx, saved_key[index], plen); SHA1_Final(key, &ctx); } // salt already has ospf_apad appended hmac_sha1(key, 20, cur_salt->salt, cur_salt->salt_length + SHA_DIGEST_LENGTH, out, 16); memcpy((unsigned char*)crypt_out[index], out, 16); } else if (cur_salt->type == 2) { SHA256_CTX ctx; if (plen < SHA256_DIGEST_LENGTH) { memcpy(key, saved_key[index], plen); memset(key + plen, 0, SHA256_DIGEST_LENGTH - plen); } else if (plen == SHA256_DIGEST_LENGTH) { memcpy(key, saved_key[index], SHA256_DIGEST_LENGTH); } else { SHA256_Init(&ctx); SHA256_Update(&ctx, saved_key[index], plen); SHA256_Final(key, &ctx); } hmac_sha256(key, 32, cur_salt->salt, cur_salt->salt_length + SHA256_DIGEST_LENGTH, out, 16); memcpy((unsigned char*)crypt_out[index], out, 16); } else if (cur_salt->type == 3) { SHA512_CTX ctx; if (plen < SHA384_DIGEST_LENGTH) { memcpy(key, saved_key[index], plen); memset(key + plen, 0, SHA384_DIGEST_LENGTH - plen); } else if (plen == SHA384_DIGEST_LENGTH) { memcpy(key, saved_key[index], SHA384_DIGEST_LENGTH); } else { SHA384_Init(&ctx); SHA384_Update(&ctx, saved_key[index], plen); SHA384_Final(key, &ctx); } hmac_sha384(key, 48, cur_salt->salt, cur_salt->salt_length + SHA384_DIGEST_LENGTH, out, 16); memcpy((unsigned char*)crypt_out[index], out, 16); } else if (cur_salt->type == 4) { SHA512_CTX ctx; if (plen < SHA512_DIGEST_LENGTH) { memcpy(key, saved_key[index], plen); memset(key + plen, 0, SHA512_DIGEST_LENGTH - plen); } else if (plen == SHA512_DIGEST_LENGTH) { memcpy(key, saved_key[index], SHA512_DIGEST_LENGTH); } else { SHA512_Init(&ctx); SHA512_Update(&ctx, saved_key[index], plen); SHA512_Final(key, &ctx); } hmac_sha512(key, 64, cur_salt->salt, cur_salt->salt_length + SHA512_DIGEST_LENGTH, out, 16); memcpy((unsigned char*)crypt_out[index], out, 16); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void ospf_set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_ospf = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, ospf_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
hci.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Alexander Sokolov <alexander.y.sokolov@gmail.com> * * Slater-Condon rule implementation for Heat-Bath CI */ #include <stdlib.h> #include <stdint.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include "hci.h" //#include <omp.h> #include <limits.h> // Computes C' = H * C in the selected CI basis void contract_h_c(double *h1, double *eri, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, double *hdiag, uint64_t ndet, double *ci1) { int *ts = malloc(sizeof(int) * ndet); #pragma omp parallel default(none) shared(h1, eri, norb, neleca, nelecb, strs, civec, hdiag, ndet, ci1, ts) { size_t ip, jp, p; int nset = (norb + 63) / 64; // Calculate excitation level for prescreening ts[0] = 0; uint64_t *str1a = strs; uint64_t *str1b = strs + nset; #pragma omp for schedule(static) for (ip = 1; ip < ndet; ++ip) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; ts[ip] = (n_excitations(stria, str1a, nset) + n_excitations(strib, str1b, nset)); } // Loop over pairs of determinants #pragma omp for schedule(static) for (ip = 0; ip < ndet; ++ip) { for (jp = 0; jp < ndet; ++jp) { if (abs(ts[ip] - ts[jp]) < 3) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; uint64_t *strja = strs + jp * 2 * nset; uint64_t *strjb = strs + jp * 2 * nset + nset; int n_excit_a = n_excitations(stria, strja, nset); int n_excit_b = n_excitations(strib, strjb, nset); // Diagonal term if (ip == jp) { ci1[ip] += hdiag[ip] * civec[ip]; } // Single excitation else if ((n_excit_a + n_excit_b) == 1) { int *ia; // alpha->alpha if (n_excit_b == 0) { ia = get_single_excitation(stria, strja, nset); int i = ia[0]; int a = ia[1]; double sign = compute_cre_des_sign(a, i, stria, nset); int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); double fai = h1[a * norb + i]; for (p = 0; p < neleca; ++p) { int k = occsa[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k; fai += eri[kkai] - eri[kiak]; } for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; fai += eri[kkai]; } if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp]; free(occsa); free(occsb); } // beta->beta else if (n_excit_a == 0) { ia = get_single_excitation(strib, strjb, nset); int i = ia[0]; int a = ia[1]; double sign = compute_cre_des_sign(a, i, strib, nset); int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); double fai = h1[a * norb + i]; for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k; fai += eri[kkai] - eri[kiak]; } for (p = 0; p < neleca; ++p) { int k = occsa[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; fai += eri[kkai]; } if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp]; free(occsa); free(occsb); } free(ia); } // Double excitation else if ((n_excit_a + n_excit_b) == 2) { int i, j, a, b; // alpha,alpha->alpha,alpha if (n_excit_b == 0) { int *ijab = get_double_excitation(stria, strja, nset); i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3]; double v, sign; int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i; int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j; if (a > j || i > b) { v = eri[ajbi] - eri[aibj]; sign = compute_cre_des_sign(b, i, stria, nset); sign *= compute_cre_des_sign(a, j, stria, nset); } else { v = eri[aibj] - eri[ajbi]; sign = compute_cre_des_sign(b, j, stria, nset); sign *= compute_cre_des_sign(a, i, stria, nset); } if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp]; free(ijab); } // beta,beta->beta,beta else if (n_excit_a == 0) { int *ijab = get_double_excitation(strib, strjb, nset); i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3]; double v, sign; int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i; int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j; if (a > j || i > b) { v = eri[ajbi] - eri[aibj]; sign = compute_cre_des_sign(b, i, strib, nset); sign *= compute_cre_des_sign(a, j, strib, nset); } else { v = eri[aibj] - eri[ajbi]; sign = compute_cre_des_sign(b, j, strib, nset); sign *= compute_cre_des_sign(a, i, strib, nset); } if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp]; free(ijab); } // alpha,beta->alpha,beta else { int *ia = get_single_excitation(stria, strja, nset); int *jb = get_single_excitation(strib, strjb, nset); i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1]; double v = eri[a * norb * norb * norb + i * norb * norb + b * norb + j]; double sign = compute_cre_des_sign(a, i, stria, nset); sign *= compute_cre_des_sign(b, j, strib, nset); if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp]; free(ia); free(jb); } } } // end if over ts } // end loop over jp } // end loop over ip } // end omp free(ts); } // Compare two strings and compute excitation level int n_excitations(uint64_t *str1, uint64_t *str2, int nset) { size_t p; int d = 0; for (p = 0; p < nset; ++p) { d += popcount(str1[p] ^ str2[p]); } return d / 2; } // Compute number of set bits in a string int popcount(uint64_t x) { const uint64_t m1 = 0x5555555555555555; //binary: 0101... const uint64_t m2 = 0x3333333333333333; //binary: 00110011.. const uint64_t m4 = 0x0f0f0f0f0f0f0f0f; //binary: 4 zeros, 4 ones ... const uint64_t m8 = 0x00ff00ff00ff00ff; //binary: 8 zeros, 8 ones ... const uint64_t m16 = 0x0000ffff0000ffff; //binary: 16 zeros, 16 ones ... const uint64_t m32 = 0x00000000ffffffff; //binary: 32 zeros, 32 ones x = (x & m1 ) + ((x >> 1) & m1 ); //put count of each 2 bits into those 2 bits x = (x & m2 ) + ((x >> 2) & m2 ); //put count of each 4 bits into those 4 bits x = (x & m4 ) + ((x >> 4) & m4 ); //put count of each 8 bits into those 8 bits x = (x & m8 ) + ((x >> 8) & m8 ); //put count of each 16 bits into those 16 bits x = (x & m16) + ((x >> 16) & m16); //put count of each 32 bits into those 32 bits x = (x & m32) + ((x >> 32) & m32); //put count of each 64 bits into those 64 bits return x; } // Compute orbital indices for a single excitation int *get_single_excitation(uint64_t *str1, uint64_t *str2, int nset) { size_t p; int *ia = malloc(sizeof(int) * 2); for (p = 0; p < nset; ++p) { size_t pp = nset - p - 1; uint64_t str_tmp = str1[pp] ^ str2[pp]; uint64_t str_particle = str_tmp & str2[pp]; uint64_t str_hole = str_tmp & str1[pp]; if (popcount(str_particle) == 1) { ia[1] = trailz(str_particle) + 64 * p; } if (popcount(str_hole) == 1) { ia[0] = trailz(str_hole) + 64 * p; } } return ia; } // Compute orbital indices for a double excitation int *get_double_excitation(uint64_t *str1, uint64_t *str2, int nset) { size_t p; int *ijab = malloc(sizeof(int) * 4); int particle_ind = 2; int hole_ind = 0; for (p = 0; p < nset; ++p) { size_t pp = nset - p - 1; uint64_t str_tmp = str1[pp] ^ str2[pp]; uint64_t str_particle = str_tmp & str2[pp]; uint64_t str_hole = str_tmp & str1[pp]; int n_particle = popcount(str_particle); int n_hole = popcount(str_hole); if (n_particle == 1) { ijab[particle_ind] = trailz(str_particle) + 64 * p; particle_ind++; } else if (n_particle == 2) { int a = trailz(str_particle); ijab[2] = a + 64 * p; str_particle &= ~(1ULL << a); int b = trailz(str_particle); ijab[3] = b + 64 * p; } if (n_hole == 1) { ijab[hole_ind] = trailz(str_hole) + 64 * p; hole_ind++; } else if (n_hole == 2) { int i = trailz(str_hole); ijab[0] = i + 64 * p; str_hole &= ~(1ULL << i); int j = trailz(str_hole); ijab[1] = j + 64 * p; } } return ijab; } // Compute number of trailing zeros in a bit string int trailz(uint64_t v) { int c = 64; // Trick to unset all bits but the first one v &= -(int64_t) v; if (v) c--; if (v & 0x00000000ffffffff) c -= 32; if (v & 0x0000ffff0000ffff) c -= 16; if (v & 0x00ff00ff00ff00ff) c -= 8; if (v & 0x0f0f0f0f0f0f0f0f) c -= 4; if (v & 0x3333333333333333) c -= 2; if (v & 0x5555555555555555) c -= 1; return c; } // Function to print int as a char for debug purposes char *int2bin(uint64_t i) { size_t bits = sizeof(uint64_t) * CHAR_BIT; char * str = malloc(bits + 1); if(!str) return NULL; str[bits] = 0; // type punning because signed shift is implementation-defined uint64_t u = *(uint64_t *)&i; for(; bits--; u >>= 1) str[bits] = u & 1 ? '1' : '0'; return str; } // Compute sign for a pair of creation and desctruction operators double compute_cre_des_sign(int p, int q, uint64_t *str, int nset) { double sign; int nperm; size_t i; int pg = p / 64; int qg = q / 64; int pb = p % 64; int qb = q % 64; if (pg > qg) { nperm = 0; for (i = nset-pg; i < nset-qg-1; ++i) { nperm += popcount(str[i]); } nperm += popcount(str[nset -1 - pg] & ((1ULL << pb) - 1)); nperm += str[nset -1 - qg] >> (qb + 1); } else if (pg < qg) { nperm = 0; for (i = nset-qg; i < nset-pg-1; ++i) { nperm += popcount(str[i]); } nperm += popcount(str[nset -1 - qg] & ((1ULL << qb) - 1)); nperm += str[nset -1 - pg] >> (pb + 1); } else { uint64_t mask; if (p > q) mask = (1ULL << pb) - (1ULL << (qb + 1)); else mask = (1ULL << qb) - (1ULL << (pb + 1)); nperm = popcount(str[nset -1 - pg] & mask); } if (nperm % 2) sign = -1.0; else sign = 1.0; return sign; } // Compute a list of occupied orbitals for a given string int *compute_occ_list(uint64_t *string, int nset, int norb, int nelec) { size_t k, i; int *occ = malloc(sizeof(int) * nelec); int off = 0; int occ_ind = 0; for (k = nset; k > 0; --k) { int i_max = ((norb - off) < 64 ? (norb - off) : 64); for (i = 0; i < i_max; ++i) { int i_occ = (string[k-1] >> i) & 1; if (i_occ) { occ[occ_ind] = i + off; occ_ind++; } } off += 64; } return occ; } // Compute a list of occupied orbitals for a given string int *compute_vir_list(uint64_t *string, int nset, int norb, int nelec) { size_t k, i; int *vir = malloc(sizeof(int) * (norb-nelec)); int off = 0; int vir_ind = 0; for (k = nset; k > 0; --k) { int i_max = ((norb - off) < 64 ? (norb - off) : 64); for (i = 0; i < i_max; ++i) { int i_occ = (string[k-1] >> i) & 1; if (!i_occ) { vir[vir_ind] = i + off; vir_ind++; } } off += 64; } return vir; } // Select determinants to include in the CI space void select_strs(double *h1, double *eri, double *jk, uint64_t *eri_sorted, uint64_t *jk_sorted, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, uint64_t ndet_start, uint64_t ndet_finish, double select_cutoff, uint64_t *strs_add, uint64_t* strs_add_size) { size_t p, q, r, i, k, a, ip, jp, kp, lp, ij, iset, idet; uint64_t max_strs_add = strs_add_size[0]; int nset = (norb + 63) / 64; // Compute Fock intermediates double *focka = malloc(sizeof(double) * norb * norb); double *fockb = malloc(sizeof(double) * norb * norb); for (p = 0; p < norb; ++p) { for (q = 0; q < norb; ++q) { double vja = 0.0; double vka = 0.0; for (i = 0; i < neleca; ++i) { size_t iipq = i * norb * norb * norb + i * norb * norb + p * norb + q; size_t piiq = p * norb * norb * norb + i * norb * norb + i * norb + q; vja += eri[iipq]; vka += eri[piiq]; } double vjb = 0.0; double vkb = 0.0; for (i = 0; i < nelecb; ++i) { size_t iipq = i * norb * norb * norb + i * norb * norb + p * norb + q; size_t piiq = p * norb * norb * norb + i * norb * norb + i * norb + q; vjb += eri[iipq]; vkb += eri[piiq]; } focka[p * norb + q] = h1[p * norb + q] + vja + vjb - vka; fockb[p * norb + q] = h1[p * norb + q] + vja + vjb - vkb; } } int *holes_a = malloc(sizeof(int) * norb); int *holes_b = malloc(sizeof(int) * norb); int *particles_a = malloc(sizeof(int) * norb); int *particles_b = malloc(sizeof(int) * norb); uint64_t strs_added = 0; // Loop over determinants for (idet = ndet_start; idet < ndet_finish; ++idet) { uint64_t *stra = strs + idet * 2 * nset; uint64_t *strb = strs + idet * 2 * nset + nset; int *occsa = compute_occ_list(stra, nset, norb, neleca); int *occsb = compute_occ_list(strb, nset, norb, nelecb); int *virsa = compute_vir_list(stra, nset, norb, neleca); int *virsb = compute_vir_list(strb, nset, norb, nelecb); double tol = select_cutoff / fabs(civec[idet]); // Single excitations int n_holes_a = 0; int n_holes_b = 0; int n_particles_a = 0; int n_particles_b = 0; for (p = 0; p < (norb - neleca); ++p) { i = virsa[p]; if (i < neleca) { holes_a[n_holes_a] = i; n_holes_a++; } } for (p = 0; p < neleca; ++p) { i = occsa[p]; if (i >= neleca) { particles_a[n_particles_a] = i; n_particles_a++; } } for (p = 0; p < (norb - nelecb); ++p) { i = virsb[p]; if (i < nelecb) { holes_b[n_holes_b] = i; n_holes_b++; } } for (p = 0; p < nelecb; ++p) { i = occsb[p]; if (i >= nelecb) { particles_b[n_particles_b] = i; n_particles_b++; } } // TODO: recompute Fock for each |Phi_I> and make sure it matches Fock in the code below // alpha->alpha for (p = 0; p < neleca; ++p) { i = occsa[p]; for (q = 0; q < (norb - neleca); ++q) { a = virsa[q]; double fai = focka[a * norb + i]; for (r = 0; r < n_particles_a; ++r) { k = particles_a[r]; fai += jk[k * norb * norb * norb + k * norb * norb + a * norb + i]; } for (r = 0; r < n_holes_a; ++r) { k = holes_a[r]; fai -= jk[k * norb * norb * norb + k * norb * norb + a * norb + i]; } for (r = 0; r < n_particles_b; ++r) { k = particles_b[r]; fai += eri[k * norb * norb * norb + k * norb * norb + a * norb + i]; } for (r = 0; r < n_holes_b; ++r) { k = holes_b[r]; fai -= eri[k * norb * norb * norb + k * norb * norb + a * norb + i]; } if (fabs(fai) > tol) { uint64_t *tmp = toggle_bit(stra, nset, a); uint64_t *new_str = toggle_bit(tmp, nset, i); for (iset = 0; iset < nset; ++iset) { // new alpha string strs_add[strs_added * 2 * nset + iset] = new_str[iset]; // old beta string strs_add[strs_added * 2 * nset + nset + iset] = strb[iset]; } free(tmp); free(new_str); strs_added++; } } } // beta->beta for (p = 0; p < nelecb; ++p) { i = occsb[p]; for (q = 0; q < (norb - nelecb); ++q) { a = virsb[q]; double fai = fockb[a * norb + i]; for (r = 0; r < n_particles_b; ++r) { k = particles_b[r]; fai += jk[k * norb * norb * norb + k * norb * norb + a * norb + i]; } for (r = 0; r < n_holes_b; ++r) { k = holes_b[r]; fai -= jk[k * norb * norb * norb + k * norb * norb + a * norb + i]; } for (r = 0; r < n_particles_a; ++r) { k = particles_a[r]; fai += eri[k * norb * norb * norb + k * norb * norb + a * norb + i]; } for (r = 0; r < n_holes_a; ++r) { k = holes_a[r]; fai -= eri[k * norb * norb * norb + k * norb * norb + a * norb + i]; } if (fabs(fai) > tol) { uint64_t *tmp = toggle_bit(strb, nset, a); uint64_t *new_str = toggle_bit(tmp, nset, i); for (iset = 0; iset < nset; ++iset) { // old alpha string strs_add[strs_added * 2 * nset + iset] = stra[iset]; // new beta string strs_add[strs_added * 2 * nset + nset + iset] = new_str[iset]; } free(tmp); free(new_str); strs_added++; } } } size_t ip_occ, jp_occ, kp_occ, lp_occ, ih; // Double excitations for (p = 0; p < norb * norb * norb * norb; ++p) { ih = jk_sorted[p]; int aaaa_bbbb_done = (fabs(jk[ih]) < tol); if (!aaaa_bbbb_done) { lp = ih % norb; ij = ih / norb; kp = ij % norb; ij = ij / norb; jp = ij % norb; ip = ij / norb; // alpha,alpha->alpha,alpha ip_occ = 0; jp_occ = 0; kp_occ = 0; lp_occ = 0; for (r = 0; r < neleca; ++r) { int occ_index = occsa[r]; if (ip == occ_index) ip_occ = 1; if (jp == occ_index) jp_occ = 1; if (kp == occ_index) kp_occ = 1; if (lp == occ_index) lp_occ = 1; } if (jp_occ && lp_occ && !ip_occ && !kp_occ) { uint64_t *tmp = toggle_bit(stra, nset, jp); uint64_t *new_str = toggle_bit(tmp, nset, ip); tmp = toggle_bit(new_str, nset, lp); new_str = toggle_bit(tmp, nset, kp); for (iset = 0; iset < nset; ++iset) { strs_add[strs_added * 2 * nset + iset] = new_str[iset]; strs_add[strs_added * 2 * nset + nset + iset] = strb[iset]; } free(tmp); free(new_str); strs_added++; } // beta,beta->beta,beta ip_occ = 0; jp_occ = 0; kp_occ = 0; lp_occ = 0; for (r = 0; r < nelecb; ++r) { int occ_index = occsb[r]; if (ip == occ_index) ip_occ = 1; if (jp == occ_index) jp_occ = 1; if (kp == occ_index) kp_occ = 1; if (lp == occ_index) lp_occ = 1; } if (jp_occ && lp_occ && !ip_occ && !kp_occ) { uint64_t *tmp = toggle_bit(strb, nset, jp); uint64_t *new_str = toggle_bit(tmp, nset, ip); tmp = toggle_bit(new_str, nset, lp); new_str = toggle_bit(tmp, nset, kp); for (iset = 0; iset < nset; ++iset) { strs_add[strs_added * 2 * nset + iset] = stra[iset]; strs_add[strs_added * 2 * nset + nset + iset] = new_str[iset]; } free(tmp); free(new_str); strs_added++; } } // alpha,beta->alpha,beta ih = eri_sorted[p]; int aabb_done = (fabs(eri[ih]) < tol); if (!aabb_done) { lp = ih % norb; ij = ih / norb; kp = ij % norb; ij = ij / norb; jp = ij % norb; ip = ij / norb; ip_occ = 0; jp_occ = 0; kp_occ = 0; lp_occ = 0; for (r = 0; r < neleca; ++r) { int occ_index = occsa[r]; if (ip == occ_index) ip_occ = 1; if (jp == occ_index) jp_occ = 1; } for (r = 0; r < nelecb; ++r) { int occ_index = occsb[r]; if (kp == occ_index) kp_occ = 1; if (lp == occ_index) lp_occ = 1; } if (jp_occ && lp_occ && !ip_occ && !kp_occ) { uint64_t *tmp = toggle_bit(stra, nset, jp); uint64_t *new_str_a = toggle_bit(tmp, nset, ip); tmp = toggle_bit(strb, nset, lp); uint64_t *new_str_b = toggle_bit(tmp, nset, kp); for (iset = 0; iset < nset; ++iset) { strs_add[strs_added * 2 * nset + iset] = new_str_a[iset]; strs_add[strs_added * 2 * nset + nset + iset] = new_str_b[iset]; } free(tmp); free(new_str_a); free(new_str_b); strs_added++; } } // Break statement if (aaaa_bbbb_done && aabb_done) { break; } } free(occsa); free(occsb); free(virsa); free(virsb); if (strs_added > max_strs_add) { printf("\nError: Number of selected strings is greater than the size of the buffer array (%ld vs %ld).\n", strs_added, max_strs_add); exit(EXIT_FAILURE); } } // end loop over determinants free(focka); free(fockb); free(holes_a); free(holes_b); free(particles_a); free(particles_b); strs_add_size[0] = strs_added; } // Toggle bit at a specified position uint64_t *toggle_bit(uint64_t *str, int nset, int p) { size_t i; uint64_t *new_str = malloc(sizeof(uint64_t) * nset); for (i = 0; i < nset; ++i) { new_str[i] = str[i]; } int p_set = p / 64; int p_rel = p % 64; new_str[nset - p_set - 1] ^= 1ULL << p_rel; return new_str; } // Compares two string indices and determines the order int order(uint64_t *strs_i, uint64_t *strs_j, int nset) { size_t i; for (i = 0; i < nset; ++i) { if (strs_i[i] > strs_j[i]) return 1; else if (strs_j[i] > strs_i[i]) return -1; } return 0; } // Recursive quick sort of string array indices void qsort_idx(uint64_t *strs, uint64_t *idx, uint64_t *nstrs_, int nset, uint64_t *new_idx) { size_t p; uint64_t nstrs = nstrs_[0]; if (nstrs <= 1) { for (p = 0; p < nstrs; ++p) new_idx[p] = idx[p]; } else { uint64_t ref = idx[nstrs - 1]; uint64_t *group_lt = malloc(sizeof(uint64_t) * nstrs); uint64_t *group_gt = malloc(sizeof(uint64_t) * nstrs); uint64_t group_lt_nstrs = 0; uint64_t group_gt_nstrs = 0; for (p = 0; p < (nstrs - 1); ++p) { uint64_t i = idx[p]; uint64_t *stri = strs + i * nset; uint64_t *strj = strs + ref * nset; int c = order(stri, strj, nset); if (c == -1) { group_lt[group_lt_nstrs] = i; group_lt_nstrs++; } else if (c == 1) { group_gt[group_gt_nstrs] = i; group_gt_nstrs++; } } uint64_t *new_idx_lt = malloc(sizeof(uint64_t) * group_lt_nstrs); uint64_t *new_idx_gt = malloc(sizeof(uint64_t) * group_gt_nstrs); qsort_idx(strs, group_lt, &group_lt_nstrs, nset, new_idx_lt); qsort_idx(strs, group_gt, &group_gt_nstrs, nset, new_idx_gt); nstrs = group_lt_nstrs + group_gt_nstrs + 1; nstrs_[0] = nstrs; for (p = 0; p < nstrs; ++p) { if (p < group_lt_nstrs) new_idx[p] = new_idx_lt[p]; else if (p == group_lt_nstrs) new_idx[p] = ref; else new_idx[p] = new_idx_gt[p - group_lt_nstrs - 1]; } free(new_idx_lt); free(new_idx_gt); free(group_lt); free(group_gt); } } // Helper function to perform recursive sort (nset is a total number of strings) void argunique(uint64_t *strs, uint64_t *sort_idx, uint64_t *nstrs_, int nset) { size_t p; uint64_t *init_idx = malloc(sizeof(uint64_t) * nstrs_[0]); for (p = 0; p < nstrs_[0]; ++p) init_idx[p] = p; qsort_idx(strs, init_idx, nstrs_, nset, sort_idx); free(init_idx); } // Computes C' = S2 * C in the selected CI basis void contract_ss_c(int norb, int neleca, int nelecb, uint64_t *strs, double *civec, uint64_t ndet, double *ci1) { int *ts = malloc(sizeof(int) * ndet); #pragma omp parallel default(none) shared(norb, neleca, nelecb, strs, civec, ndet, ci1, ts) { size_t ip, jp, p, q; int nset = (norb + 63) / 64; // Calculate excitation level for prescreening ts[0] = 0; uint64_t *str1a = strs; uint64_t *str1b = strs + nset; #pragma omp for schedule(static) for (ip = 1; ip < ndet; ++ip) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; ts[ip] = (n_excitations(stria, str1a, nset) + n_excitations(strib, str1b, nset)); } // Loop over pairs of determinants #pragma omp for schedule(static) for (ip = 0; ip < ndet; ++ip) { for (jp = 0; jp < ndet; ++jp) { if (abs(ts[ip] - ts[jp]) < 3) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; uint64_t *strja = strs + jp * 2 * nset; uint64_t *strjb = strs + jp * 2 * nset + nset; int n_excit_a = n_excitations(stria, strja, nset); int n_excit_b = n_excitations(strib, strjb, nset); // Diagonal term if (ip == jp) { double apb = (double) (neleca + nelecb); double amb = (double) (neleca - nelecb); double prefactor = apb / 2.0 + amb * amb / 4.0; int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); for (p = 0; p < neleca; ++p) { int pa = occsa[p]; for (q = 0; q < nelecb; ++q) { int qb = occsb[q]; if (pa == qb) prefactor -= 1.0; } } ci1[ip] += prefactor * civec[ip]; free(occsa); free(occsb); } // Double excitation else if ((n_excit_a + n_excit_b) == 2) { int i, j, a, b; // alpha,beta->alpha,beta if (n_excit_a == n_excit_b) { int *ia = get_single_excitation(stria, strja, nset); int *jb = get_single_excitation(strib, strjb, nset); i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1]; if (i == b && j == a) { double sign = compute_cre_des_sign(a, i, stria, nset); sign *= compute_cre_des_sign(b, j, strib, nset); ci1[ip] -= sign * civec[jp]; } free(ia); free(jb); } } } // end if over ts } // end loop over jp } // end loop over ip } // end omp free(ts); } // Computes C' = H * C and C'' = S2 * C simultaneously in the selected CI basis void contract_h_c_ss_c(double *h1, double *eri, int norb, int neleca, int nelecb, uint64_t *strs, double *civec, double *hdiag, uint64_t ndet, double *ci1, double *ci2) { int *ts = malloc(sizeof(int) * ndet); #pragma omp parallel default(none) shared(h1, eri, norb, neleca, nelecb, strs, civec, hdiag, ndet, ci1, ci2, ts) { size_t ip, jp, p, q; int nset = (norb + 63) / 64; // Calculate excitation level for prescreening ts[0] = 0; uint64_t *str1a = strs; uint64_t *str1b = strs + nset; #pragma omp for schedule(static) for (ip = 1; ip < ndet; ++ip) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; ts[ip] = (n_excitations(stria, str1a, nset) + n_excitations(strib, str1b, nset)); } // Loop over pairs of determinants #pragma omp for schedule(static) for (ip = 0; ip < ndet; ++ip) { for (jp = 0; jp < ndet; ++jp) { if (abs(ts[ip] - ts[jp]) < 3) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; uint64_t *strja = strs + jp * 2 * nset; uint64_t *strjb = strs + jp * 2 * nset + nset; int n_excit_a = n_excitations(stria, strja, nset); int n_excit_b = n_excitations(strib, strjb, nset); // Diagonal term if (ip == jp) { ci1[ip] += hdiag[ip] * civec[ip]; // S^2 double apb = (double) (neleca + nelecb); double amb = (double) (neleca - nelecb); double prefactor = apb / 2.0 + amb * amb / 4.0; int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); for (p = 0; p < neleca; ++p) { int pa = occsa[p]; for (q = 0; q < nelecb; ++q) { int qb = occsb[q]; if (pa == qb) prefactor -= 1.0; } } ci2[ip] += prefactor * civec[ip]; free(occsa); free(occsb); } // Single excitation else if ((n_excit_a + n_excit_b) == 1) { int *ia; // alpha->alpha if (n_excit_b == 0) { ia = get_single_excitation(stria, strja, nset); int i = ia[0]; int a = ia[1]; double sign = compute_cre_des_sign(a, i, stria, nset); int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); double fai = h1[a * norb + i]; for (p = 0; p < neleca; ++p) { int k = occsa[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k; fai += eri[kkai] - eri[kiak]; } for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; fai += eri[kkai]; } if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp]; free(occsa); free(occsb); } // beta->beta else if (n_excit_a == 0) { ia = get_single_excitation(strib, strjb, nset); int i = ia[0]; int a = ia[1]; double sign = compute_cre_des_sign(a, i, strib, nset); int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); double fai = h1[a * norb + i]; for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; int kiak = k * norb * norb * norb + i * norb * norb + a * norb + k; fai += eri[kkai] - eri[kiak]; } for (p = 0; p < neleca; ++p) { int k = occsa[p]; int kkai = k * norb * norb * norb + k * norb * norb + a * norb + i; fai += eri[kkai]; } if (fabs(fai) > 1.0E-14) ci1[ip] += sign * fai * civec[jp]; free(occsa); free(occsb); } free(ia); } // Double excitation else if ((n_excit_a + n_excit_b) == 2) { int i, j, a, b; // alpha,alpha->alpha,alpha if (n_excit_b == 0) { int *ijab = get_double_excitation(stria, strja, nset); i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3]; double v, sign; int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i; int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j; if (a > j || i > b) { v = eri[ajbi] - eri[aibj]; sign = compute_cre_des_sign(b, i, stria, nset); sign *= compute_cre_des_sign(a, j, stria, nset); } else { v = eri[aibj] - eri[ajbi]; sign = compute_cre_des_sign(b, j, stria, nset); sign *= compute_cre_des_sign(a, i, stria, nset); } if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp]; free(ijab); } // beta,beta->beta,beta else if (n_excit_a == 0) { int *ijab = get_double_excitation(strib, strjb, nset); i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3]; double v, sign; int ajbi = a * norb * norb * norb + j * norb * norb + b * norb + i; int aibj = a * norb * norb * norb + i * norb * norb + b * norb + j; if (a > j || i > b) { v = eri[ajbi] - eri[aibj]; sign = compute_cre_des_sign(b, i, strib, nset); sign *= compute_cre_des_sign(a, j, strib, nset); } else { v = eri[aibj] - eri[ajbi]; sign = compute_cre_des_sign(b, j, strib, nset); sign *= compute_cre_des_sign(a, i, strib, nset); } if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp]; free(ijab); } // alpha,beta->alpha,beta else { int *ia = get_single_excitation(stria, strja, nset); int *jb = get_single_excitation(strib, strjb, nset); i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1]; double v = eri[a * norb * norb * norb + i * norb * norb + b * norb + j]; double sign = compute_cre_des_sign(a, i, stria, nset); sign *= compute_cre_des_sign(b, j, strib, nset); if (fabs(v) > 1.0E-14) ci1[ip] += sign * v * civec[jp]; // S^2 if (i == b && j == a) { ci2[ip] -= sign * civec[jp]; } free(ia); free(jb); } } } // end if over ts } // end loop over jp } // end loop over ip } // end omp free(ts); } // 2-RDM is sorted in physicists notation: gamma_pqsr=<\Phi|a_p^dag a_q^dag a_r a_s|\Phi> void compute_rdm12s(int norb, int neleca, int nelecb, uint64_t *strs, double *civec, uint64_t ndet, double *rdm1a, double *rdm1b, double *rdm2aa, double *rdm2ab, double *rdm2bb) { #pragma omp parallel default(none) shared(norb, neleca, nelecb, strs, civec, ndet, rdm1a, rdm1b, rdm2aa, rdm2ab, rdm2bb) { size_t ip, jp, p, q, r, s; int nset = (norb + 63) / 64; double ci_sq = 0.0; double *rdm1a_private = malloc(sizeof(double) * norb * norb); double *rdm1b_private = malloc(sizeof(double) * norb * norb); double *rdm2aa_private = malloc(sizeof(double) * norb * norb * norb * norb); double *rdm2ab_private = malloc(sizeof(double) * norb * norb * norb * norb); double *rdm2bb_private = malloc(sizeof(double) * norb * norb * norb * norb); for (p = 0; p < norb * norb; ++p) { rdm1a_private[p] = 0.0; rdm1b_private[p] = 0.0; } for (p = 0; p < norb * norb * norb * norb; ++p) { rdm2aa_private[p] = 0.0; rdm2ab_private[p] = 0.0; rdm2bb_private[p] = 0.0; } // Loop over pairs of determinants #pragma omp for schedule(static) for (ip = 0; ip < ndet; ++ip) { for (jp = 0; jp < ndet; ++jp) { uint64_t *stria = strs + ip * 2 * nset; uint64_t *strib = strs + ip * 2 * nset + nset; uint64_t *strja = strs + jp * 2 * nset; uint64_t *strjb = strs + jp * 2 * nset + nset; int n_excit_a = n_excitations(stria, strja, nset); int n_excit_b = n_excitations(strib, strjb, nset); // Diagonal term if (ip == jp) { int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); ci_sq = civec[ip] * civec[ip]; // Diagonal rdm1_aa for (p = 0; p < neleca; ++p) { int k = occsa[p]; int kk = k * norb + k; rdm1a_private[kk] += ci_sq; } // Diagonal rdm1_bb for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int kk = k * norb + k; rdm1b_private[kk] += ci_sq; } // Diagonal rdm2_aaaa for (p = 0; p < neleca; ++p) { int k = occsa[p]; for (q = 0; q < neleca; ++q) { int j = occsa[q]; int kjkj = k * norb * norb * norb + j * norb * norb + k * norb + j; int kjjk = k * norb * norb * norb + j * norb * norb + j * norb + k; rdm2aa_private[kjkj] += ci_sq; rdm2aa_private[kjjk] -= ci_sq; } // Diagonal rdm2_abab for (q = 0; q < nelecb; ++q) { int j = occsb[q]; int kjkj = k * norb * norb * norb + j * norb * norb + k * norb + j; rdm2ab_private[kjkj] += ci_sq; } } // Diagonal rdm2_bbbb for (p = 0; p < nelecb; ++p) { int k = occsb[p]; for (q = 0; q < nelecb; ++q) { int j = occsb[q]; int kjkj = k * norb * norb * norb + j * norb * norb + k * norb + j; int kjjk = k * norb * norb * norb + j * norb * norb + j * norb + k; rdm2bb_private[kjkj] += ci_sq; rdm2bb_private[kjjk] -= ci_sq; } } free(occsa); free(occsb); } // Single excitation else if ((n_excit_a + n_excit_b) == 1) { int *ia; // alpha->alpha if (n_excit_b == 0) { ia = get_single_excitation(stria, strja, nset); int i = ia[0]; int a = ia[1]; double sign = compute_cre_des_sign(a, i, stria, nset); int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); ci_sq = sign * civec[ip] * civec[jp]; // rdm1_aa rdm1a_private[a * norb + i] += ci_sq; // rdm2_aaaa for (p = 0; p < neleca; ++p) { int k = occsa[p]; int akik = a * norb * norb * norb + k * norb * norb + i * norb + k; int akki = a * norb * norb * norb + k * norb * norb + k * norb + i; int kaki = k * norb * norb * norb + a * norb * norb + k * norb + i; int kaik = k * norb * norb * norb + a * norb * norb + i * norb + k; rdm2aa_private[akik] += ci_sq; rdm2aa_private[akki] -= ci_sq; rdm2aa_private[kaik] -= ci_sq; rdm2aa_private[kaki] += ci_sq; } // rdm2_abab for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int akik = a * norb * norb * norb + k * norb * norb + i * norb + k; rdm2ab_private[akik] += ci_sq; } free(occsa); free(occsb); } // beta->beta else if (n_excit_a == 0) { ia = get_single_excitation(strib, strjb, nset); int i = ia[0]; int a = ia[1]; double sign = compute_cre_des_sign(a, i, strib, nset); int *occsa = compute_occ_list(stria, nset, norb, neleca); int *occsb = compute_occ_list(strib, nset, norb, nelecb); ci_sq = sign * civec[ip] * civec[jp]; // rdm1_bb rdm1b_private[a * norb + i] += ci_sq; // rdm2_bbbb for (p = 0; p < nelecb; ++p) { int k = occsb[p]; int akik = a * norb * norb * norb + k * norb * norb + i * norb + k; int akki = a * norb * norb * norb + k * norb * norb + k * norb + i; int kaki = k * norb * norb * norb + a * norb * norb + k * norb + i; int kaik = k * norb * norb * norb + a * norb * norb + i * norb + k; rdm2bb_private[akik] += ci_sq; rdm2bb_private[akki] -= ci_sq; rdm2bb_private[kaik] -= ci_sq; rdm2bb_private[kaki] += ci_sq; } // rdm2_abab for (p = 0; p < neleca; ++p) { int k = occsa[p]; int kaki = k * norb * norb * norb + a * norb * norb + k * norb + i; rdm2ab_private[kaki] += ci_sq; } free(occsa); free(occsb); } free(ia); } // Double excitation else if ((n_excit_a + n_excit_b) == 2) { int i, j, a, b; // rdm2_aaaa if (n_excit_b == 0) { int *ijab = get_double_excitation(stria, strja, nset); i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3]; double sign; int baij = b * norb * norb * norb + a * norb * norb + i * norb + j; int baji = b * norb * norb * norb + a * norb * norb + j * norb + i; int abij = a * norb * norb * norb + b * norb * norb + i * norb + j; int abji = a * norb * norb * norb + b * norb * norb + j * norb + i; if (a > j || i > b) { sign = compute_cre_des_sign(b, i, stria, nset); sign *= compute_cre_des_sign(a, j, stria, nset); ci_sq = sign * civec[ip] * civec[jp]; rdm2aa_private[baij] += ci_sq; rdm2aa_private[baji] -= ci_sq; rdm2aa_private[abij] -= ci_sq; rdm2aa_private[abji] += ci_sq; } else { sign = compute_cre_des_sign(b, j, stria, nset); sign *= compute_cre_des_sign(a, i, stria, nset); ci_sq = sign * civec[ip] * civec[jp]; rdm2aa_private[baij] -= ci_sq; rdm2aa_private[baji] += ci_sq; rdm2aa_private[abij] += ci_sq; rdm2aa_private[abji] -= ci_sq; } free(ijab); } // rdm2_bbbb else if (n_excit_a == 0) { int *ijab = get_double_excitation(strib, strjb, nset); i = ijab[0]; j = ijab[1]; a = ijab[2]; b = ijab[3]; double v, sign; int baij = b * norb * norb * norb + a * norb * norb + i * norb + j; int baji = b * norb * norb * norb + a * norb * norb + j * norb + i; int abij = a * norb * norb * norb + b * norb * norb + i * norb + j; int abji = a * norb * norb * norb + b * norb * norb + j * norb + i; if (a > j || i > b) { sign = compute_cre_des_sign(b, i, strib, nset); sign *= compute_cre_des_sign(a, j, strib, nset); ci_sq = sign * civec[ip] * civec[jp]; rdm2bb_private[baij] += ci_sq; rdm2bb_private[baji] -= ci_sq; rdm2bb_private[abij] -= ci_sq; rdm2bb_private[abji] += ci_sq; } else { sign = compute_cre_des_sign(b, j, strib, nset); sign *= compute_cre_des_sign(a, i, strib, nset); ci_sq = sign * civec[ip] * civec[jp]; rdm2bb_private[baij] -= ci_sq; rdm2bb_private[baji] += ci_sq; rdm2bb_private[abij] += ci_sq; rdm2bb_private[abji] -= ci_sq; } free(ijab); } // rdm2_abab else { int *ia = get_single_excitation(stria, strja, nset); int *jb = get_single_excitation(strib, strjb, nset); i = ia[0]; a = ia[1]; j = jb[0]; b = jb[1]; double sign = compute_cre_des_sign(a, i, stria, nset); sign *= compute_cre_des_sign(b, j, strib, nset); ci_sq = sign * civec[ip] * civec[jp]; int abij = a * norb * norb * norb + b * norb * norb + i * norb + j; rdm2ab_private[abij] += ci_sq; free(ia); free(jb); } } } // end loop over jp } // end loop over ip #pragma omp critical { for (p = 0; p < norb * norb; ++p) { rdm1a[p] += rdm1a_private[p]; rdm1b[p] += rdm1b_private[p]; } for (p = 0; p < norb * norb * norb * norb; ++p) { rdm2aa[p] += rdm2aa_private[p]; rdm2ab[p] += rdm2ab_private[p]; rdm2bb[p] += rdm2bb_private[p]; } } free(rdm1a_private); free(rdm1b_private); free(rdm2aa_private); free(rdm2ab_private); free(rdm2bb_private); } // end omp }
parallel_for_reduction.c
#include <stdio.h> #include <math.h> #include "omp_testsuite.h" int check_parallel_for_reduction (FILE * logFile) { int sum = 0; int known_sum; double dsum = 0; double dknown_sum; double dt = 0.5; /* base of geometric row for + and - test */ double rounding_error = 1.E-9; #define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */ int diff; double ddiff; int product = 1; int known_product; #define MAX_FACTOR 10 #define KNOWN_PRODUCT 3628800 /* 10! */ int logic_and = 1; int logic_or = 0; int bit_and = 1; int bit_or = 0; int exclusiv_bit_or = 0; int logics[LOOPCOUNT]; int i; double dpt; int result = 0; dt = 1. / 3.; known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; #pragma omp parallel for schedule(dynamic,1) reduction(+:sum) for (i = 1; i <= LOOPCOUNT; i++) { sum = sum + i; } if (known_sum != sum) { result++; fprintf (logFile, "Error in sum with integers: Result was %d instead of %d\n", sum, known_sum); } diff = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; #pragma omp parallel for schedule(dynamic,1) reduction(-:diff) for (i = 1; i <= LOOPCOUNT; ++i) { diff = diff - i; } if (diff != 0) { result++; fprintf (logFile, "Error in difference with integers: Result was %d instead of 0.\n", diff); } /* Tests for doubles */ dsum = 0; dpt = 1; for (i = 0; i < DOUBLE_DIGITS; ++i) { dpt *= dt; } dknown_sum = (1 - dpt) / (1 - dt); #pragma omp parallel for schedule(dynamic,1) reduction(+:dsum) for (i = 0; i < DOUBLE_DIGITS; ++i) { dsum += pow (dt, i); } if (fabs (dsum - dknown_sum) > rounding_error) { result++; fprintf (logFile, "Error in sum with doubles: Result was %f instead of %f (Difference: %E)\n", dsum, dknown_sum, dsum - dknown_sum); } dpt = 1; for (i = 0; i < DOUBLE_DIGITS; ++i) { dpt *= dt; } fprintf (logFile, "\n"); ddiff = (1 - dpt) / (1 - dt); #pragma omp parallel for schedule(dynamic,1) reduction(-:ddiff) for (i = 0; i < DOUBLE_DIGITS; ++i) { ddiff -= pow (dt, i); } if (fabs (ddiff) > rounding_error) { result++; fprintf (logFile, "Error in Difference with doubles: Result was %E instead of 0.0\n", ddiff); } #pragma omp parallel for schedule(dynamic,1) reduction(*:product) for (i = 1; i <= MAX_FACTOR; i++) { product *= i; } known_product = KNOWN_PRODUCT; if (known_product != product) { result++; fprintf (logFile, "Error in Product with integers: Result was %d instead of %d\n\n", product, known_product); } for (i = 0; i < LOOPCOUNT; i++) { logics[i] = 1; } #pragma omp parallel for schedule(dynamic,1) reduction(&&:logic_and) for (i = 0; i < LOOPCOUNT; ++i) { logic_and = (logic_and && logics[i]); } if (!logic_and) { result++; fprintf (logFile, "Error in logic AND part 1.\n"); } logic_and = 1; logics[LOOPCOUNT / 2] = 0; #pragma omp parallel for schedule(dynamic,1) reduction(&&:logic_and) for (i = 0; i < LOOPCOUNT; ++i) { logic_and = logic_and && logics[i]; } if (logic_and) { result++; fprintf (logFile, "Error in logic AND part 2.\n"); } for (i = 0; i < LOOPCOUNT; i++) { logics[i] = 0; } #pragma omp parallel for schedule(dynamic,1) reduction(||:logic_or) for (i = 0; i < LOOPCOUNT; ++i) { logic_or = logic_or || logics[i]; } if (logic_or) { result++; fprintf (logFile, "Error in logic OR part 1.\n"); } logic_or = 0; logics[LOOPCOUNT / 2] = 1; #pragma omp parallel for schedule(dynamic,1) reduction(||:logic_or) for (i = 0; i < LOOPCOUNT; ++i) { logic_or = logic_or || logics[i]; } if (!logic_or) { result++; fprintf (logFile, "Error in logic OR part 2.\n"); } for (i = 0; i < LOOPCOUNT; ++i) { logics[i] = 1; } #pragma omp parallel for schedule(dynamic,1) reduction(&:bit_and) for (i = 0; i < LOOPCOUNT; ++i) { bit_and = (bit_and & logics[i]); } if (!bit_and) { result++; fprintf (logFile, "Error in BIT AND part 1.\n"); } bit_and = 1; logics[LOOPCOUNT / 2] = 0; #pragma omp parallel for schedule(dynamic,1) reduction(&:bit_and) for (i = 0; i < LOOPCOUNT; ++i) { bit_and = bit_and & logics[i]; } if (bit_and) { result++; fprintf (logFile, "Error in BIT AND part 2.\n"); } for (i = 0; i < LOOPCOUNT; i++) { logics[i] = 0; } #pragma omp parallel for schedule(dynamic,1) reduction(|:bit_or) for (i = 0; i < LOOPCOUNT; ++i) { bit_or = bit_or | logics[i]; } if (bit_or) { result++; fprintf (logFile, "Error in BIT OR part 1\n"); } bit_or = 0; logics[LOOPCOUNT / 2] = 1; #pragma omp parallel for schedule(dynamic,1) reduction(|:bit_or) for (i = 0; i < LOOPCOUNT; ++i) { bit_or = bit_or | logics[i]; } if (!bit_or) { result++; fprintf (logFile, "Error in BIT OR part 2\n"); } for (i = 0; i < LOOPCOUNT; i++) { logics[i] = 0; } #pragma omp parallel for schedule(dynamic,1) reduction(^:exclusiv_bit_or) for (i = 0; i < LOOPCOUNT; ++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } if (exclusiv_bit_or) { result++; fprintf (logFile, "Error in EXCLUSIV BIT OR part 1\n"); } exclusiv_bit_or = 0; logics[LOOPCOUNT / 2] = 1; #pragma omp parallel for schedule(dynamic,1) reduction(^:exclusiv_bit_or) for (i = 0; i < LOOPCOUNT; ++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } if (!exclusiv_bit_or) { result++; fprintf (logFile, "Error in EXCLUSIV BIT OR part 2\n"); } /*printf("\nResult:%d\n",result); */ return (result == 0); } int crosscheck_parallel_for_reduction (FILE * logFile) { int sum = 0; int known_sum; double dsum = 0; double dknown_sum; double dt = 0.5; /* base of geometric row for + and - test */ double rounding_error = 1.E-9; #define DOUBLE_DIGITS 20 /* dt^DOUBLE_DIGITS */ int diff; double ddiff; int product = 1; int known_product; #define MAX_FACTOR 10 #define KNOWN_PRODUCT 3628800 /* 10! */ int logic_and = 1; int logic_or = 0; int bit_and = 1; int bit_or = 0; int exclusiv_bit_or = 0; int logics[LOOPCOUNT]; int i; double dpt; int result = 0; dt = 1. / 3.; known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; #pragma omp parallel for schedule(dynamic,1) for (i = 1; i <= LOOPCOUNT; i++) { sum = sum + i; } if (known_sum != sum) { result++; /*printf("\nError in Sum with integers\n"); */ } diff = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; #pragma omp parallel for schedule(dynamic,1) for (i = 1; i <= LOOPCOUNT; ++i) { diff = diff - i; } if (diff != 0) { result++; /*printf("\nError in Difference: Result was %d instead of 0.\n",diff); */ } /* Tests for doubles */ dsum = 0; dpt = 1; for (i = 0; i < DOUBLE_DIGITS; ++i) { dpt *= dt; } dknown_sum = (1 - dpt) / (1 - dt); #pragma omp parallel for schedule(dynamic,1) for (i = 0; i < DOUBLE_DIGITS; ++i) { dsum += pow (dt, i); } if (dsum != dknown_sum && (((dsum - dknown_sum) < rounding_error) || ((dsum - dknown_sum) > rounding_error))) { result++; /*printf("\nError in sum with doubles: Calculated: %f Expected: %f (Difference: %E)\n",dsum,dknown_sum, dsum-dknown_sum); */ } dpt = 1; for (i = 0; i < DOUBLE_DIGITS; ++i) { dpt *= dt; } ddiff = (1 - dpt) / (1 - dt); #pragma omp parallel for schedule(dynamic,1) for (i = 0; i < DOUBLE_DIGITS; ++i) { ddiff -= pow (dt, i); } if (ddiff > rounding_error || ddiff < (-rounding_error)) { result++; /*printf("\nError in Difference with doubles: Difference %E\n",ddiff); */ } #pragma omp parallel for schedule(dynamic,1) for (i = 1; i <= MAX_FACTOR; i++) { product *= i; } known_product = KNOWN_PRODUCT; if (known_product != product) { result++; /*printf("\nError in Product: Known Product: %d\tcalculated Product: %d\n\n",known_product,product); */ } for (i = 0; i < LOOPCOUNT; i++) { logics[i] = 1; } #pragma omp parallel for schedule(dynamic,1) for (i = 0; i < LOOPCOUNT; ++i) { logic_and = (logic_and && logics[i]); } if (!logic_and) { result++; /*printf("Error in AND part 1\n"); */ } logic_and = 1; logics[LOOPCOUNT / 2] = 0; #pragma omp parallel for schedule(dynamic,1) for (i = 0; i < LOOPCOUNT; ++i) { logic_and = logic_and && logics[i]; } if (logic_and) { result++; /*printf("Error in AND part 2"); */ } for (i = 0; i < LOOPCOUNT; i++) { logics[i] = 0; } #pragma omp parallel for schedule(dynamic,1) for (i = 0; i < LOOPCOUNT; ++i) { logic_or = logic_or || logics[i]; } if (logic_or) { result++; /*printf("Error in OR part 1"); */ } logic_or = 0; logics[LOOPCOUNT / 2] = 1; #pragma omp parallel for schedule(dynamic,1) for (i = 0; i < LOOPCOUNT; ++i) { logic_or = logic_or || logics[i]; } if (!logic_or) { result++; /*printf("Error in OR part 2"); */ } for (i = 0; i < LOOPCOUNT; ++i) { logics[i] = 1; } #pragma omp parallel for schedule(dynamic,1) for (i = 0; i < LOOPCOUNT; ++i) { bit_and = (bit_and & logics[i]); } if (!bit_and) { result++; /*printf("Error in BIT AND part 1\n"); */ } bit_and = 1; logics[LOOPCOUNT / 2] = 0; #pragma omp parallel for schedule(dynamic,1) for (i = 0; i < LOOPCOUNT; ++i) { bit_and = bit_and & logics[i]; } if (bit_and) { result++; /*printf("Error in BIT AND part 2"); */ } for (i = 0; i < LOOPCOUNT; i++) { logics[i] = 0; } #pragma omp parallel for schedule(dynamic,1) for (i = 0; i < LOOPCOUNT; ++i) { bit_or = bit_or | logics[i]; } if (bit_or) { result++; /*printf("Error in BIT OR part 1\n"); */ } bit_or = 0; logics[LOOPCOUNT / 2] = 1; #pragma omp parallel for schedule(dynamic,1) for (i = 0; i < LOOPCOUNT; ++i) { bit_or = bit_or | logics[i]; } if (!bit_or) { result++; /*printf("Error in BIT OR part 2\n"); */ } for (i = 0; i < LOOPCOUNT; i++) { logics[i] = 0; } #pragma omp parallel for schedule(dynamic,1) for (i = 0; i < LOOPCOUNT; ++i) { exclusiv_bit_or = exclusiv_bit_or | logics[i]; } if (exclusiv_bit_or) { result++; /*printf("Error in EXCLUSIV BIT OR part 1\n"); */ } exclusiv_bit_or = 0; logics[LOOPCOUNT / 2] = 1; #pragma omp parallel for schedule(dynamic,1) for (i = 0; i < LOOPCOUNT; ++i) { exclusiv_bit_or = exclusiv_bit_or | logics[i]; } if (!exclusiv_bit_or) { result++; /*printf("Error in EXCLUSIV BIT OR part 2\n"); */ } /*printf("\nResult:%d\n",result); */ return (result == 0); }
raytracer.h
#pragma once #include "resource.h" #include <iostream> #include <linalg.h> #include <memory> #include <omp.h> #include <random> using namespace linalg::aliases; namespace cg::renderer { struct ray { ray(float3 position, float3 direction) : position(position) { this->direction = normalize(direction); } float3 position; float3 direction; }; struct payload { float t; float3 bary; cg::color color; }; template<typename VB> struct triangle { triangle(const VB& vertex_a, const VB& vertex_b, const VB& vertex_c); float3 a; float3 b; float3 c; float3 ba; float3 ca; float3 na; float3 nb; float3 nc; float3 ambient; float3 diffuse; float3 emissive; }; template<typename VB> inline triangle<VB>::triangle( const VB& vertex_a, const VB& vertex_b, const VB& vertex_c) { a = float3{vertex_a.x, vertex_a.y, vertex_a.z}; b = float3{vertex_b.x, vertex_b.y, vertex_b.z}; c = float3{vertex_c.x, vertex_c.y, vertex_c.z}; ba = b - a; ca = c - a; na = float3{vertex_a.nx, vertex_a.ny, vertex_a.nz}; nb = float3{vertex_b.nx, vertex_b.ny, vertex_b.nz}; nc = float3{vertex_c.nx, vertex_c.ny, vertex_c.nz}; ambient = {vertex_a.ambient_r, vertex_a.ambient_g, vertex_a.ambient_b}; diffuse = {vertex_a.diffuse_r, vertex_a.diffuse_g, vertex_a.diffuse_b}; emissive = {vertex_a.emissive_r, vertex_a.emissive_g, vertex_a.emissive_b}; } template<typename VB> class aabb { public: void add_triangle(const triangle<VB> triangle); const std::vector<triangle<VB>>& get_triangles() const; bool aabb_test(const ray& ray) const; protected: std::vector<triangle<VB>> triangles; float3 aabb_min; float3 aabb_max; }; struct light { float3 position; float3 color; }; template<typename VB, typename RT> class raytracer { public: raytracer(){}; ~raytracer(){}; void set_render_target(std::shared_ptr<resource<RT>> in_render_target); void clear_render_target(const RT& in_clear_value); void set_viewport(size_t in_width, size_t in_height); void set_vertex_buffers(std::vector<std::shared_ptr<cg::resource<VB>>> in_vertex_buffers); void set_index_buffers(std::vector<std::shared_ptr<cg::resource<unsigned int>>> in_index_buffers); void build_acceleration_structure(); std::vector<aabb<VB>> acceleration_structures; void ray_generation(float3 position, float3 direction, float3 right, float3 up, size_t depth, size_t accumulation_num); payload trace_ray(const ray& ray, size_t depth, float max_t = 1000.f, float min_t = 0.001f) const; payload intersection_shader(const triangle<VB>& triangle, const ray& ray) const; std::function<payload(const ray& ray)> miss_shader = nullptr; std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle, size_t depth)> closest_hit_shader = nullptr; std::function<payload(const ray& ray, payload& payload, const triangle<VB>& triangle)> any_hit_shader = nullptr; float2 get_jitter(int frame_id); protected: std::shared_ptr<cg::resource<RT>> render_target; std::shared_ptr<cg::resource<float3>> history; std::vector<std::shared_ptr<cg::resource<unsigned int>>> index_buffers; std::vector<std::shared_ptr<cg::resource<VB>>> vertex_buffers; size_t width = 1920; size_t height = 1080; }; template<typename VB, typename RT> inline void raytracer<VB, RT>::set_render_target( std::shared_ptr<resource<RT>> in_render_target) { render_target = in_render_target; } template<typename VB, typename RT> inline void raytracer<VB, RT>::clear_render_target(const RT& in_clear_value) { for (size_t i = 0; i < render_target->get_number_of_elements(); i++) { render_target->item(i) = in_clear_value; if (history) history->item(i) = float3 {0.f, 0.f, 0.f}; } } template<typename VB, typename RT> void raytracer<VB, RT>::set_index_buffers(std::vector<std::shared_ptr<cg::resource<unsigned int>>> in_index_buffers) { index_buffers = in_index_buffers; } template<typename VB, typename RT> inline void raytracer<VB, RT>::set_vertex_buffers(std::vector<std::shared_ptr<cg::resource<VB>>> in_vertex_buffers) { vertex_buffers = in_vertex_buffers; } template<typename VB, typename RT> inline void raytracer<VB, RT>::build_acceleration_structure() { for (size_t shape_id = 0; shape_id < index_buffers.size(); shape_id++) { auto& index_buffer = index_buffers[shape_id]; auto& vertex_buffer = vertex_buffers[shape_id]; size_t index_id = 0; aabb<VB> aabb; while (index_id < index_buffer->get_number_of_elements()) { triangle<VB> triangle( vertex_buffer->item(index_buffer->item(index_id++)), vertex_buffer->item(index_buffer->item(index_id++)), vertex_buffer->item(index_buffer->item(index_id++))); aabb.add_triangle(triangle); } acceleration_structures.push_back(aabb); } } template<typename VB, typename RT> inline void raytracer<VB, RT>::set_viewport(size_t in_width, size_t in_height) { width = in_width; height = in_height; history = std::make_shared<cg::resource<float3>> (width, height); } template<typename VB, typename RT> inline void raytracer<VB, RT>::ray_generation(float3 position, float3 direction, float3 right, float3 up, size_t depth, size_t accumulation_num) { float frame_weight = 1.f / static_cast<float>(accumulation_num); for (int frame_id = 0; frame_id < accumulation_num; frame_id++) { float2 jitter = get_jitter(frame_id); for (int x = 0; x < width; x++) { //#pragma omp parallel for for (int y = 0; y < height; y++) { float u = (2.f * x + jitter.x) / static_cast<float>(width - 1) - 1.f; float v = (2.f * y + jitter.y) / static_cast<float>(height - 1) - 1.f; u *= static_cast<float>(width) / static_cast<float>(height); float3 ray_direction = direction + u * right - v * up; ray ray(position, ray_direction); payload payload = trace_ray(ray, depth); auto& history_pixel = history->item(x, y); history_pixel += sqrt(float3 {payload.color.r, payload.color.g, payload.color.b} * frame_weight); render_target->item(x, y) = RT::from_float3(history_pixel); } } } } template<typename VB, typename RT> inline payload raytracer<VB, RT>::trace_ray( const ray& ray, size_t depth, float max_t, float min_t) const { if (depth == 0) return miss_shader(ray); depth--; payload closest_hit_payload = {}; closest_hit_payload.t = max_t; const triangle<VB>* closest_triangle = nullptr; for (auto& aabb: acceleration_structures) { if (!aabb.aabb_test(ray)) continue; for (auto& triangle: aabb.get_triangles()) { payload payload = intersection_shader(triangle, ray); if (payload.t > min_t && payload.t < closest_hit_payload.t) { closest_hit_payload = payload; closest_triangle = &triangle; if (any_hit_shader) return any_hit_shader(ray, payload, triangle); } } } if (closest_hit_payload.t < max_t) { if (closest_hit_shader) return closest_hit_shader(ray, closest_hit_payload, *closest_triangle, depth); } return miss_shader(ray); } template<typename VB, typename RT> inline payload raytracer<VB, RT>::intersection_shader( const triangle<VB>& triangle, const ray& ray) const { payload payload{}; payload.t = -1.f; float3 pvec = cross(ray.direction, triangle.ca); float det = dot(triangle.ba, pvec); if (det > -1e-8 && det < 1e-8) return payload; float inv_det = 1.f / det; float3 tvec = ray.position - triangle.a; float u = dot(tvec, pvec) * inv_det; if (u < 0.f || u > 1.f) return payload; float3 qvec = cross(tvec, triangle.ba); float v = dot(ray.direction, qvec) * inv_det; if (v < 0.f || u + v > 1.f) return payload; payload.t = dot(triangle.ca, qvec) * inv_det; payload.bary = float3{1.f - u - v, u, v}; return payload; } template<typename VB, typename RT> float2 raytracer<VB, RT>::get_jitter(int frame_id) { float2 result{0.f, 0.f}; constexpr int base_x = 2; int index = frame_id + 1; float inv_base = 1.f / base_x; float fraction = inv_base; while (index > 0){ result.x += (index % base_x) * fraction; index /= base_x; fraction *= inv_base; } constexpr int base_y = 3; index = frame_id + 1; inv_base = 1.f / base_y; fraction = inv_base; while (index > 0){ result.y += (index % base_y) * fraction; index /= base_y; fraction *= inv_base; } return result - 0.5f; } template<typename VB> inline void aabb<VB>::add_triangle(const triangle<VB> triangle) { if (triangles.empty()) aabb_max = aabb_min = triangle.a; triangles.push_back(triangle); aabb_max = max(aabb_max, triangle.a); aabb_max = max(aabb_max, triangle.b); aabb_max = max(aabb_max, triangle.c); aabb_min = min(aabb_min, triangle.a); aabb_min = min(aabb_min, triangle.b); aabb_min = min(aabb_min, triangle.c); } template<typename VB> inline const std::vector<triangle<VB>>& aabb<VB>::get_triangles() const { return triangles; } template<typename VB> inline bool aabb<VB>::aabb_test(const ray& ray) const { float3 inv_ray_direction = float3(1.f) / ray.direction; float3 t0 = (aabb_max - ray.position) * inv_ray_direction; float3 t1 = (aabb_min - ray.position) * inv_ray_direction; float3 tmax = max(t0, t1); float3 tmin = min(t0, t1); return maxelem(tmin) <= minelem(tmax); } }// namespace cg::renderer
GraphReconstructor.h
// // Copyright (C) 2015 Yahoo Japan Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #pragma once #include <unordered_map> #include <unordered_set> #include <list> #ifdef _OPENMP #include <omp.h> #else #warning "*** OMP is *NOT* available! ***" #endif namespace NGT { class GraphReconstructor { public: static void extractGraph(std::vector<NGT::ObjectDistances> &graph, NGT::GraphIndex &graphIndex) { graph.reserve(graphIndex.repository.size()); for (size_t id = 1; id < graphIndex.repository.size(); id++) { if (id % 1000000 == 0) { std::cerr << "GraphReconstructor::extractGraph: Processed " << id << " objects." << std::endl; } try { NGT::GraphNode &node = *graphIndex.getNode(id); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) NGT::ObjectDistances nd; nd.reserve(node.size()); for (auto n = node.begin(graphIndex.repository.allocator); n != node.end(graphIndex.repository.allocator); ++n) { nd.push_back(ObjectDistance((*n).id, (*n).distance)); } graph.push_back(nd); #else graph.push_back(node); #endif if (graph.back().size() != graph.back().capacity()) { std::cerr << "GraphReconstructor::extractGraph: Warning! The graph size must be the same as the capacity. " << id << std::endl; } } catch(NGT::Exception &err) { graph.push_back(NGT::ObjectDistances()); continue; } } } static void adjustPaths(NGT::Index &outIndex) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::cerr << "construct index is not implemented." << std::endl; exit(1); #else NGT::GraphIndex &outGraph = dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex()); size_t rStartRank = 0; std::list<std::pair<size_t, NGT::GraphNode> > tmpGraph; for (size_t id = 1; id < outGraph.repository.size(); id++) { NGT::GraphNode &node = *outGraph.getNode(id); tmpGraph.push_back(std::pair<size_t, NGT::GraphNode>(id, node)); if (node.size() > rStartRank) { node.resize(rStartRank); } } size_t removeCount = 0; for (size_t rank = rStartRank; ; rank++) { bool edge = false; Timer timer; for (auto it = tmpGraph.begin(); it != tmpGraph.end();) { size_t id = (*it).first; try { NGT::GraphNode &node = (*it).second; if (rank >= node.size()) { it = tmpGraph.erase(it); continue; } edge = true; if (rank >= 1 && node[rank - 1].distance > node[rank].distance) { std::cerr << "distance order is wrong!" << std::endl; std::cerr << id << ":" << rank << ":" << node[rank - 1].id << ":" << node[rank].id << std::endl; } NGT::GraphNode &tn = *outGraph.getNode(id); volatile bool found = false; if (rank < 1000) { for (size_t tni = 0; tni < tn.size() && !found; tni++) { if (tn[tni].id == node[rank].id) { continue; } NGT::GraphNode &dstNode = *outGraph.getNode(tn[tni].id); for (size_t dni = 0; dni < dstNode.size(); dni++) { if ((dstNode[dni].id == node[rank].id) && (dstNode[dni].distance < node[rank].distance)) { found = true; break; } } } } else { #ifdef _OPENMP #pragma omp parallel for num_threads(10) #endif for (size_t tni = 0; tni < tn.size(); tni++) { if (found) { continue; } if (tn[tni].id == node[rank].id) { continue; } NGT::GraphNode &dstNode = *outGraph.getNode(tn[tni].id); for (size_t dni = 0; dni < dstNode.size(); dni++) { if ((dstNode[dni].id == node[rank].id) && (dstNode[dni].distance < node[rank].distance)) { found = true; } } } } if (!found) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) outGraph.addEdge(id, node.at(i, outGraph.repository.allocator).id, node.at(i, outGraph.repository.allocator).distance, true); #else tn.push_back(NGT::ObjectDistance(node[rank].id, node[rank].distance)); #endif } else { removeCount++; } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; it++; continue; } it++; } if (edge == false) { break; } } #endif // NGT_SHARED_MEMORY_ALLOCATOR } static void adjustPathsEffectively(NGT::Index &outIndex, size_t minNoOfEdges = 0) { NGT::GraphIndex &outGraph = dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex()); adjustPathsEffectively(outGraph, minNoOfEdges); } static bool edgeComp(NGT::ObjectDistance a, NGT::ObjectDistance b) { return a.id < b.id; } #if defined(NGT_SHARED_MEMORY_ALLOCATOR) static void insert(NGT::GraphNode &node, size_t edgeID, NGT::Distance edgeDistance, NGT::GraphIndex &graph) { NGT::ObjectDistance edge(edgeID, edgeDistance); GraphNode::iterator ni = std::lower_bound(node.begin(graph.repository.allocator), node.end(graph.repository.allocator), edge, edgeComp); node.insert(ni, edge, graph.repository.allocator); } static bool hasEdge(NGT::GraphIndex &graph, size_t srcNodeID, size_t dstNodeID) { NGT::GraphNode &srcNode = *graph.getNode(srcNodeID); GraphNode::iterator ni = std::lower_bound(srcNode.begin(graph.repository.allocator), srcNode.end(graph.repository.allocator), ObjectDistance(dstNodeID, 0.0), edgeComp); return (ni != srcNode.end(graph.repository.allocator)) && ((*ni).id == dstNodeID); } #else static void insert(NGT::GraphNode &node, size_t edgeID, NGT::Distance edgeDistance) { NGT::ObjectDistance edge(edgeID, edgeDistance); GraphNode::iterator ni = std::lower_bound(node.begin(), node.end(), edge, edgeComp); node.insert(ni, edge); } static bool hasEdge(NGT::GraphIndex &graph, size_t srcNodeID, size_t dstNodeID) { NGT::GraphNode &srcNode = *graph.getNode(srcNodeID); GraphNode::iterator ni = std::lower_bound(srcNode.begin(), srcNode.end(), ObjectDistance(dstNodeID, 0.0), edgeComp); return (ni != srcNode.end()) && ((*ni).id == dstNodeID); } #endif static void adjustPathsEffectively(NGT::GraphIndex &outGraph, size_t minNoOfEdges) { Timer timer; timer.start(); std::vector<NGT::GraphNode> tmpGraph; for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &node = *outGraph.getNode(id); tmpGraph.push_back(node); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) node.clear(outGraph.repository.allocator); #else node.clear(); #endif } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; #if defined(NGT_SHARED_MEMORY_ALLOCATOR) tmpGraph.push_back(NGT::GraphNode(outGraph.repository.allocator)); #else tmpGraph.push_back(NGT::GraphNode()); #endif } } if (outGraph.repository.size() != tmpGraph.size() + 1) { std::stringstream msg; msg << "GraphReconstructor: Fatal inner error. " << outGraph.repository.size() << ":" << tmpGraph.size(); NGTThrowException(msg); } timer.stop(); std::cerr << "GraphReconstructor::adjustPaths: graph preparing time=" << timer << std::endl; timer.reset(); timer.start(); std::vector<std::vector<std::pair<uint32_t, uint32_t> > > removeCandidates(tmpGraph.size()); int removeCandidateCount = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (size_t idx = 0; idx < tmpGraph.size(); ++idx) { auto it = tmpGraph.begin() + idx; size_t id = idx + 1; try { NGT::GraphNode &srcNode = *it; std::unordered_map<uint32_t, std::pair<size_t, double> > neighbors; for (size_t sni = 0; sni < srcNode.size(); ++sni) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) neighbors[srcNode.at(sni, outGraph.repository.allocator).id] = std::pair<size_t, double>(sni, srcNode.at(sni, outGraph.repository.allocator).distance); #else neighbors[srcNode[sni].id] = std::pair<size_t, double>(sni, srcNode[sni].distance); #endif } std::vector<std::pair<int, std::pair<uint32_t, uint32_t> > > candidates; for (size_t sni = 0; sni < srcNode.size(); sni++) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) NGT::GraphNode &pathNode = tmpGraph[srcNode.at(sni, outGraph.repository.allocator).id - 1]; #else NGT::GraphNode &pathNode = tmpGraph[srcNode[sni].id - 1]; #endif for (size_t pni = 0; pni < pathNode.size(); pni++) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) auto dstNodeID = pathNode.at(pni, outGraph.repository.allocator).id; #else auto dstNodeID = pathNode[pni].id; #endif auto dstNode = neighbors.find(dstNodeID); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) if (dstNode != neighbors.end() && srcNode.at(sni, outGraph.repository.allocator).distance < (*dstNode).second.second && pathNode.at(pni, outGraph.repository.allocator).distance < (*dstNode).second.second ) { #else if (dstNode != neighbors.end() && srcNode[sni].distance < (*dstNode).second.second && pathNode[pni].distance < (*dstNode).second.second ) { #endif #if defined(NGT_SHARED_MEMORY_ALLOCATOR) candidates.push_back(std::pair<int, std::pair<uint32_t, uint32_t> >((*dstNode).second.first, std::pair<uint32_t, uint32_t>(srcNode.at(sni, outGraph.repository.allocator).id, dstNodeID))); #else candidates.push_back(std::pair<int, std::pair<uint32_t, uint32_t> >((*dstNode).second.first, std::pair<uint32_t, uint32_t>(srcNode[sni].id, dstNodeID))); #endif removeCandidateCount++; } } } sort(candidates.begin(), candidates.end(), std::greater<std::pair<int, std::pair<uint32_t, uint32_t>>>()); removeCandidates[id - 1].reserve(candidates.size()); for (size_t i = 0; i < candidates.size(); i++) { removeCandidates[id - 1].push_back(candidates[i].second); } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } timer.stop(); std::cerr << "GraphReconstructor::adjustPaths: extracting removed edge candidates time=" << timer << std::endl; timer.reset(); timer.start(); std::list<size_t> ids; for (size_t idx = 0; idx < tmpGraph.size(); ++idx) { ids.push_back(idx + 1); } int removeCount = 0; removeCandidateCount = 0; for (size_t rank = 0; ids.size() != 0; rank++) { for (auto it = ids.begin(); it != ids.end(); ) { size_t id = *it; size_t idx = id - 1; try { NGT::GraphNode &srcNode = tmpGraph[idx]; if (rank >= srcNode.size()) { if (!removeCandidates[idx].empty() && minNoOfEdges == 0) { std::cerr << "Something wrong! ID=" << id << " # of remaining candidates=" << removeCandidates[idx].size() << std::endl; abort(); } #if !defined(NGT_SHARED_MEMORY_ALLOCATOR) NGT::GraphNode empty; tmpGraph[idx] = empty; #endif it = ids.erase(it); continue; } if (removeCandidates[idx].size() > 0 && ((*outGraph.getNode(id)).size() + srcNode.size() - rank) > minNoOfEdges) { removeCandidateCount++; bool pathExist = false; #if defined(NGT_SHARED_MEMORY_ALLOCATOR) while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode.at(rank, outGraph.repository.allocator).id)) { #else while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode[rank].id)) { #endif size_t path = removeCandidates[idx].back().first; size_t dst = removeCandidates[idx].back().second; removeCandidates[idx].pop_back(); if (removeCandidates[idx].empty()) { std::vector<std::pair<uint32_t, uint32_t>> empty; removeCandidates[idx] = empty; } if ((hasEdge(outGraph, id, path)) && (hasEdge(outGraph, path, dst))) { pathExist = true; #if defined(NGT_SHARED_MEMORY_ALLOCATOR) while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode.at(rank, outGraph.repository.allocator).id)) { #else while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode[rank].id)) { #endif removeCandidates[idx].pop_back(); if (removeCandidates[idx].empty()) { std::vector<std::pair<uint32_t, uint32_t>> empty; removeCandidates[idx] = empty; } } break; } } if (pathExist) { removeCount++; it++; continue; } } NGT::GraphNode &outSrcNode = *outGraph.getNode(id); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) insert(outSrcNode, srcNode.at(rank, outGraph.repository.allocator).id, srcNode.at(rank, outGraph.repository.allocator).distance, outGraph); #else insert(outSrcNode, srcNode[rank].id, srcNode[rank].distance); #endif } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; it++; continue; } it++; } } for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &node = *outGraph.getNode(id); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::sort(node.begin(outGraph.repository.allocator), node.end(outGraph.repository.allocator)); #else std::sort(node.begin(), node.end()); #endif } catch(...) {} } } static void convertToANNG(std::vector<NGT::ObjectDistances> &graph) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::cerr << "convertToANNG is not implemented for shared memory." << std::endl; return; #else std::cerr << "convertToANNG begin" << std::endl; for (size_t idx = 0; idx < graph.size(); idx++) { NGT::GraphNode &node = graph[idx]; for (auto ni = node.begin(); ni != node.end(); ++ni) { graph[(*ni).id - 1].push_back(NGT::ObjectDistance(idx + 1, (*ni).distance)); } } for (size_t idx = 0; idx < graph.size(); idx++) { NGT::GraphNode &node = graph[idx]; if (node.size() == 0) { continue; } std::sort(node.begin(), node.end()); NGT::ObjectID prev = 0; for (auto it = node.begin(); it != node.end();) { if (prev == (*it).id) { it = node.erase(it); continue; } prev = (*it).id; it++; } NGT::GraphNode tmp = node; node.swap(tmp); } std::cerr << "convertToANNG end" << std::endl; #endif } static void reconstructGraph(std::vector<NGT::ObjectDistances> &graph, NGT::GraphIndex &outGraph, size_t originalEdgeSize, size_t reverseEdgeSize) { if (reverseEdgeSize > 10000) { std::cerr << "something wrong. Edge size=" << reverseEdgeSize << std::endl; exit(1); } NGT::Timer originalEdgeTimer, reverseEdgeTimer, normalizeEdgeTimer; originalEdgeTimer.start(); size_t warningCount = 0; const size_t warningLimit = 10; for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &node = *outGraph.getNode(id); if (originalEdgeSize == 0) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) node.clear(outGraph.repository.allocator); #else NGT::GraphNode empty; node.swap(empty); #endif } else { NGT::ObjectDistances n = graph[id - 1]; if (n.size() < originalEdgeSize) { warningCount++; if (warningCount <= warningLimit) { std::cerr << "GraphReconstructor: Warning. The edges are too few. " << n.size() << ":" << originalEdgeSize << " for " << id << std::endl; } if (warningCount == warningLimit) { std::cerr << "GraphReconstructor: Info. Too many warnings. Warning is disabled." << std::endl; } continue; } n.resize(originalEdgeSize); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) node.copy(n, outGraph.repository.allocator); #else node.swap(n); #endif } } catch(NGT::Exception &err) { warningCount++; if (warningCount <= warningLimit) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; } if (warningCount == warningLimit) { std::cerr << "GraphReconstructor: Info. Too many warnings. Warning is disabled." << std::endl; } continue; } } if (warningCount > warningLimit) { std::cerr << "GraphReconstructor: The total " << warningCount << " Warnings." << std::endl; } originalEdgeTimer.stop(); reverseEdgeTimer.start(); int insufficientNodeCount = 0; for (size_t id = 1; id <= graph.size(); ++id) { try { NGT::ObjectDistances &node = graph[id - 1]; size_t rsize = reverseEdgeSize; if (rsize > node.size()) { insufficientNodeCount++; rsize = node.size(); } for (size_t i = 0; i < rsize; ++i) { NGT::Distance distance = node[i].distance; size_t nodeID = node[i].id; try { NGT::GraphNode &n = *outGraph.getNode(nodeID); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) n.push_back(NGT::ObjectDistance(id, distance), outGraph.repository.allocator); #else n.push_back(NGT::ObjectDistance(id, distance)); #endif } catch(...) {} } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } reverseEdgeTimer.stop(); if (insufficientNodeCount != 0) { std::cerr << "# of the nodes edges of which are in short = " << insufficientNodeCount << std::endl; } normalizeEdgeTimer.start(); for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &n = *outGraph.getNode(id); if (id % 100000 == 0) { std::cerr << "Processed " << id << " nodes" << std::endl; } #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::sort(n.begin(outGraph.repository.allocator), n.end(outGraph.repository.allocator)); #else std::sort(n.begin(), n.end()); #endif NGT::ObjectID prev = 0; #if defined(NGT_SHARED_MEMORY_ALLOCATOR) for (auto it = n.begin(outGraph.repository.allocator); it != n.end(outGraph.repository.allocator);) { #else for (auto it = n.begin(); it != n.end();) { #endif if (prev == (*it).id) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) it = n.erase(it, outGraph.repository.allocator); #else it = n.erase(it); #endif continue; } prev = (*it).id; it++; } #if !defined(NGT_SHARED_MEMORY_ALLOCATOR) NGT::GraphNode tmp = n; n.swap(tmp); #endif } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } normalizeEdgeTimer.stop(); std::cerr << "Reconstruction time=" << originalEdgeTimer.time << ":" << reverseEdgeTimer.time << ":" << normalizeEdgeTimer.time << std::endl; NGT::Property prop; outGraph.getProperty().get(prop); prop.graphType = NGT::NeighborhoodGraph::GraphTypeONNG; outGraph.getProperty().set(prop); } static void reconstructGraphWithConstraint(std::vector<NGT::ObjectDistances> &graph, NGT::GraphIndex &outGraph, size_t originalEdgeSize, size_t reverseEdgeSize, char mode = 'a') { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::cerr << "reconstructGraphWithConstraint is not implemented." << std::endl; abort(); #else NGT::Timer originalEdgeTimer, reverseEdgeTimer, normalizeEdgeTimer; if (reverseEdgeSize > 10000) { std::cerr << "something wrong. Edge size=" << reverseEdgeSize << std::endl; exit(1); } for (size_t id = 1; id < outGraph.repository.size(); id++) { if (id % 1000000 == 0) { std::cerr << "Processed " << id << std::endl; } try { NGT::GraphNode &node = *outGraph.getNode(id); if (node.size() == 0) { continue; } node.clear(); NGT::GraphNode empty; node.swap(empty); } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } NGT::GraphIndex::showStatisticsOfGraph(outGraph); std::vector<ObjectDistances> reverse(graph.size() + 1); for (size_t id = 1; id <= graph.size(); ++id) { try { NGT::GraphNode &node = graph[id - 1]; if (id % 100000 == 0) { std::cerr << "Processed (summing up) " << id << std::endl; } for (size_t rank = 0; rank < node.size(); rank++) { reverse[node[rank].id].push_back(ObjectDistance(id, node[rank].distance)); } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } std::vector<std::pair<size_t, size_t> > reverseSize(graph.size() + 1); reverseSize[0] = std::pair<size_t, size_t>(0, 0); for (size_t rid = 1; rid <= graph.size(); ++rid) { reverseSize[rid] = std::pair<size_t, size_t>(reverse[rid].size(), rid); } std::sort(reverseSize.begin(), reverseSize.end()); std::vector<uint32_t> indegreeCount(graph.size(), 0); size_t zeroCount = 0; for (size_t sizerank = 0; sizerank <= reverseSize.size(); sizerank++) { if (reverseSize[sizerank].first == 0) { zeroCount++; continue; } size_t rid = reverseSize[sizerank].second; ObjectDistances &rnode = reverse[rid]; for (auto rni = rnode.begin(); rni != rnode.end(); ++rni) { if (indegreeCount[(*rni).id] >= reverseEdgeSize) { continue; } NGT::GraphNode &node = *outGraph.getNode(rid); if (indegreeCount[(*rni).id] > 0 && node.size() >= originalEdgeSize) { continue; } node.push_back(NGT::ObjectDistance((*rni).id, (*rni).distance)); indegreeCount[(*rni).id]++; } } reverseEdgeTimer.stop(); std::cerr << "The number of nodes with zero outdegree by reverse edges=" << zeroCount << std::endl; NGT::GraphIndex::showStatisticsOfGraph(outGraph); normalizeEdgeTimer.start(); for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &n = *outGraph.getNode(id); if (id % 100000 == 0) { std::cerr << "Processed " << id << std::endl; } std::sort(n.begin(), n.end()); NGT::ObjectID prev = 0; for (auto it = n.begin(); it != n.end();) { if (prev == (*it).id) { it = n.erase(it); continue; } prev = (*it).id; it++; } NGT::GraphNode tmp = n; n.swap(tmp); } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } normalizeEdgeTimer.stop(); NGT::GraphIndex::showStatisticsOfGraph(outGraph); originalEdgeTimer.start(); for (size_t id = 1; id < outGraph.repository.size(); id++) { if (id % 1000000 == 0) { std::cerr << "Processed " << id << std::endl; } NGT::GraphNode &node = graph[id - 1]; try { NGT::GraphNode &onode = *outGraph.getNode(id); bool stop = false; for (size_t rank = 0; (rank < node.size() && rank < originalEdgeSize) && stop == false; rank++) { switch (mode) { case 'a': if (onode.size() >= originalEdgeSize) { stop = true; continue; } break; case 'c': break; } NGT::Distance distance = node[rank].distance; size_t nodeID = node[rank].id; outGraph.addEdge(id, nodeID, distance, false); } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } originalEdgeTimer.stop(); NGT::GraphIndex::showStatisticsOfGraph(outGraph); std::cerr << "Reconstruction time=" << originalEdgeTimer.time << ":" << reverseEdgeTimer.time << ":" << normalizeEdgeTimer.time << std::endl; #endif } // reconstruct a pseudo ANNG with a fewer edges from an actual ANNG with more edges. // graph is a source ANNG // index is an index with a reconstructed ANNG static void reconstructANNGFromANNG(std::vector<NGT::ObjectDistances> &graph, NGT::Index &index, size_t edgeSize) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::cerr << "reconstructANNGFromANNG is not implemented." << std::endl; abort(); #else NGT::GraphIndex &outGraph = dynamic_cast<NGT::GraphIndex&>(index.getIndex()); // remove all edges in the index. for (size_t id = 1; id < outGraph.repository.size(); id++) { if (id % 1000000 == 0) { std::cerr << "Processed " << id << " nodes." << std::endl; } try { NGT::GraphNode &node = *outGraph.getNode(id); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) node.clear(outGraph.repository.allocator); #else NGT::GraphNode empty; node.swap(empty); #endif } catch(NGT::Exception &err) { } } for (size_t id = 1; id <= graph.size(); ++id) { size_t edgeCount = 0; try { NGT::ObjectDistances &node = graph[id - 1]; NGT::GraphNode &n = *outGraph.getNode(id); NGT::Distance prevDistance = 0.0; assert(n.size() == 0); for (size_t i = 0; i < node.size(); ++i) { NGT::Distance distance = node[i].distance; if (prevDistance > distance) { NGTThrowException("Edge distance order is invalid"); } prevDistance = distance; size_t nodeID = node[i].id; if (node[i].id < id) { try { NGT::GraphNode &dn = *outGraph.getNode(nodeID); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) n.push_back(NGT::ObjectDistance(nodeID, distance), outGraph.repository.allocator); dn.push_back(NGT::ObjectDistance(id, distance), outGraph.repository.allocator); #else n.push_back(NGT::ObjectDistance(nodeID, distance)); dn.push_back(NGT::ObjectDistance(id, distance)); #endif } catch(...) {} edgeCount++; } if (edgeCount >= edgeSize) { break; } } } catch(NGT::Exception &err) { } } for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &n = *outGraph.getNode(id); std::sort(n.begin(), n.end()); NGT::ObjectID prev = 0; for (auto it = n.begin(); it != n.end();) { if (prev == (*it).id) { it = n.erase(it); continue; } prev = (*it).id; it++; } NGT::GraphNode tmp = n; n.swap(tmp); } catch (...) { } } #endif } static void refineANNG(NGT::Index &index, bool unlog, float epsilon = 0.1, float accuracy = 0.0, int noOfEdges = 0, int exploreEdgeSize = INT_MIN, size_t batchSize = 10000) { NGT::StdOstreamRedirector redirector(unlog); redirector.begin(); try { refineANNG(index, epsilon, accuracy, noOfEdges, exploreEdgeSize, batchSize); } catch (NGT::Exception &err) { redirector.end(); throw(err); } } static void refineANNG(NGT::Index &index, float epsilon = 0.1, float accuracy = 0.0, int noOfEdges = 0, int exploreEdgeSize = INT_MIN, size_t batchSize = 10000) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) NGTThrowException("GraphReconstructor::refineANNG: Not implemented for the shared memory option."); #else auto prop = static_cast<GraphIndex&>(index.getIndex()).getGraphProperty(); NGT::ObjectRepository &objectRepository = index.getObjectSpace().getRepository(); NGT::GraphIndex &graphIndex = static_cast<GraphIndex&>(index.getIndex()); size_t nOfObjects = objectRepository.size(); bool error = false; std::string errorMessage; size_t noOfSearchedEdges = noOfEdges < 0 ? -noOfEdges : (noOfEdges > prop.edgeSizeForCreation ? noOfEdges : prop.edgeSizeForCreation); noOfSearchedEdges++; for (size_t bid = 1; bid < nOfObjects; bid += batchSize) { NGT::ObjectDistances results[batchSize]; // search #pragma omp parallel for for (size_t idx = 0; idx < batchSize; idx++) { size_t id = bid + idx; if (id % 100000 == 0) { std::cerr << "# of processed objects=" << id << std::endl; } if (objectRepository.isEmpty(id)) { continue; } NGT::SearchContainer searchContainer(*objectRepository.get(id)); searchContainer.setResults(&results[idx]); assert(prop.edgeSizeForCreation > 0); searchContainer.setSize(noOfSearchedEdges); if (accuracy > 0.0) { searchContainer.setExpectedAccuracy(accuracy); } else { searchContainer.setEpsilon(epsilon); } if (exploreEdgeSize != INT_MIN) { searchContainer.setEdgeSize(exploreEdgeSize); } if (!error) { try { index.search(searchContainer); } catch (NGT::Exception &err) { #pragma omp critical { error = true; errorMessage = err.what(); } } } } if (error) { std::stringstream msg; msg << "GraphReconstructor::refineANNG: " << errorMessage; NGTThrowException(msg); } // outgoing edges #pragma omp parallel for for (size_t idx = 0; idx < batchSize; idx++) { size_t id = bid + idx; if (objectRepository.isEmpty(id)) { continue; } NGT::GraphNode &node = *graphIndex.getNode(id); for (auto i = results[idx].begin(); i != results[idx].end(); ++i) { if ((*i).id != id) { node.push_back(*i); } } std::sort(node.begin(), node.end()); // dedupe ObjectID prev = 0; for (GraphNode::iterator ni = node.begin(); ni != node.end();) { if (prev == (*ni).id) { ni = node.erase(ni); continue; } prev = (*ni).id; ni++; } } // incomming edges if (noOfEdges != 0) { continue; } for (size_t idx = 0; idx < batchSize; idx++) { size_t id = bid + idx; if (id % 10000 == 0) { std::cerr << "# of processed objects=" << id << std::endl; } for (auto i = results[idx].begin(); i != results[idx].end(); ++i) { if ((*i).id != id) { NGT::GraphNode &node = *graphIndex.getNode((*i).id); graphIndex.addEdge(node, id, (*i).distance, false); } } } } if (noOfEdges > 0) { // prune to build knng size_t nedges = noOfEdges < 0 ? -noOfEdges : noOfEdges; #pragma omp parallel for for (ObjectID id = 1; id < nOfObjects; ++id) { if (objectRepository.isEmpty(id)) { continue; } NGT::GraphNode &node = *graphIndex.getNode(id); if (node.size() > nedges) { node.resize(nedges); } } } #endif // defined(NGT_SHARED_MEMORY_ALLOCATOR) } }; }; // NGT
omp_nested_loop1.c
#include <stdio.h> #include <omp.h> int main() { int i, j; #pragma omp parallel { #pragma omp for private(j) for (i=9; i>6; i--) { printf("[%d] (i=%d)\n", omp_get_thread_num(), i); for (j=0; j<5; j++) { printf("[%d] (i,j=%d,%d)\n", omp_get_thread_num(), i, j); } } } return 0; }
GB_unop__bnot_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__bnot_uint32_uint32 // op(A') function: GB_unop_tran__bnot_uint32_uint32 // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = aij // unaryop: cij = ~(aij) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ~(x) ; // casting #define GB_CAST(z, aij) \ uint32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = aij ; \ Cx [pC] = ~(z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BNOT || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__bnot_uint32_uint32 ( uint32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = ~(z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; uint32_t z = aij ; Cx [p] = ~(z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__bnot_uint32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
print_affinity.c
#define _GNU_SOURCE #include <stdio.h> #include <unistd.h> // gethostname, getopt #include <sched.h> // sched_getaffinity #ifdef _OPENMP #include <omp.h> #endif extern void runnable (cpu_set_t *, int *, int *); void print_affinity_ (int *rank) { char hnbuf[64]; int thread = 0; int lo; int hi; cpu_set_t coremask; gethostname (hnbuf, sizeof (hnbuf)); #pragma omp parallel private (thread, coremask, lo, hi) { #ifdef _OPENMP thread = omp_get_thread_num (); #endif // Passing zero means use the calling process sched_getaffinity (0, sizeof (coremask), &coremask); runnable (&coremask, &lo, &hi); #pragma omp critical { printf ("MPI rank %d thread %d on %s. (Runnable range: lo=%d hi=%d)\n", *rank, thread, hnbuf, lo, hi); fflush (stdout); } } }
openMPHelloworld.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif // If openMP is not supported, DO NOT try to use any openMP thing and try to keep it runable as well. void Hello() { #ifdef _OPENMP int myRank = omp_get_thread_num(); int threadCount = omp_get_num_threads(); #else int myRank = 0; int threadCount = 1; // if openMP is not available, try to code for single-thread programm. #endif printf("Hello world from thread %d of %d\n", myRank, threadCount); } int main(int argc, char* argv[]) { int threadCount = strtol(argv[1], NULL, 10); // "10" represent the base of the counted number. // If threadCount argument is detected as a non-positive number, the system will assign one itself. #pragma omp parallel num_threads(threadCount) Hello(); return 0; }
beta_projectors_base.h
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file beta_projectors_base.h * * \brief Contains declaration and implementation of sirius::Beta_projectors_base class. */ #ifndef __BETA_PROJECTORS_BASE_H__ #define __BETA_PROJECTORS_BASE_H__ namespace sirius { #ifdef __GPU extern "C" void create_beta_gk_gpu(int num_atoms, int num_gkvec, int const* beta_desc, double_complex const* beta_gk_t, double const* gkvec, double const* atom_pos, double_complex* beta_gk); #endif enum beta_desc_idx { nbf = 0, offset = 1, offset_t = 2, ia = 3 }; struct beta_chunk_t { /// Number of beta-projectors in the current chunk. int num_beta_; /// Number of atoms in the current chunk. int num_atoms_; /// Offset in the global index of beta projectors. int offset_; /// Descriptor of block of beta-projectors for an atom. mdarray<int, 2> desc_; /// Positions of atoms. mdarray<double, 2> atom_pos_; }; /// Base class for beta-projectors, gradient of beta-projectors and strain derivatives of beta-projectors. template <int N> class Beta_projectors_base { protected: Simulation_context& ctx_; /// List of G+k vectors. Gvec const& gkvec_; /// Mapping between local and global G+k vector index. std::vector<int> const& igk_; /// Coordinates of G+k vectors used by GPU kernel. mdarray<double, 2> gkvec_coord_; /// Phase-factor independent coefficients of |beta> functions for atom types. std::array<matrix<double_complex>, N> pw_coeffs_t_; bool reallocate_pw_coeffs_t_on_gpu_{true}; matrix<double_complex> pw_coeffs_a_; std::vector<beta_chunk_t> beta_chunks_; int max_num_beta_; /// Total number of beta-projectors among atom types. int num_beta_t_; /// Split beta-projectors into chunks. void split_in_chunks() { auto& uc = ctx_.unit_cell(); /* initial chunk size */ int chunk_size = std::min(uc.num_atoms(), 256); /* maximum number of chunks */ int num_chunks = uc.num_atoms() / chunk_size + std::min(1, uc.num_atoms() % chunk_size); /* final maximum chunk size */ chunk_size = uc.num_atoms() / num_chunks + std::min(1, uc.num_atoms() % num_chunks); int offset_in_beta_gk{0}; beta_chunks_ = std::vector<beta_chunk_t>(num_chunks); for (int ib = 0; ib < num_chunks; ib++) { /* number of atoms in this chunk */ int na = std::min(uc.num_atoms(), (ib + 1) * chunk_size) - ib * chunk_size; beta_chunks_[ib].num_atoms_ = na; beta_chunks_[ib].desc_ = mdarray<int, 2>(4, na); beta_chunks_[ib].atom_pos_ = mdarray<double, 2>(3, na); int num_beta{0}; for (int i = 0; i < na; i++) { /* global index of atom by local index and chunk */ int ia = ib * chunk_size + i; auto pos = uc.atom(ia).position(); auto& type = uc.atom(ia).type(); /* atom fractional coordinates */ for (int x: {0, 1, 2}) { beta_chunks_[ib].atom_pos_(x, i) = pos[x]; } /* number of beta functions for atom */ beta_chunks_[ib].desc_(beta_desc_idx::nbf, i) = type.mt_basis_size(); /* offset in beta_gk*/ beta_chunks_[ib].desc_(beta_desc_idx::offset, i) = num_beta; /* offset in beta_gk_t */ beta_chunks_[ib].desc_(beta_desc_idx::offset_t, i) = type.offset_lo(); /* global index of atom */ beta_chunks_[ib].desc_(beta_desc_idx::ia, i) = ia; num_beta += type.mt_basis_size(); } /* number of beta-projectors in this chunk */ beta_chunks_[ib].num_beta_ = num_beta; beta_chunks_[ib].offset_ = offset_in_beta_gk; offset_in_beta_gk += num_beta; if (ctx_.processing_unit() == GPU) { beta_chunks_[ib].desc_.allocate(memory_t::device); beta_chunks_[ib].desc_.copy<memory_t::host, memory_t::device>(); beta_chunks_[ib].atom_pos_.allocate(memory_t::device); beta_chunks_[ib].atom_pos_.copy<memory_t::host, memory_t::device>(); } } max_num_beta_ = 0; for (auto& e: beta_chunks_) { max_num_beta_ = std::max(max_num_beta_, e.num_beta_); } num_beta_t_ = 0; for (int iat = 0; iat < uc.num_atom_types(); iat++) { num_beta_t_ += uc.atom_type(iat).mt_lo_basis_size(); } } /// A buffer for <beta|phi> product, shared between instances of Beta_projectors_base class. /** Stored as double to handle both gamma- and general k-point cases */ static mdarray<double, 1>& beta_phi_shared(size_t size__, memory_t mem_type__) { static mdarray<double, 1> a; /* reallocate buffer */ if (a.size() < size__) { a = mdarray<double, 1>(size__, mem_type__, "beta_phi_shared"); } return a; } /// A buffer for beta projectors for a chunk of atoms. static mdarray<double_complex, 1>& pw_coeffs_a_shared(size_t size__, memory_t mem_type__) { static mdarray<double_complex, 1> a; /* reallocate buffer */ if (a.size() < size__) { a = mdarray<double_complex, 1>(size__, mem_type__, "pw_coeffs_a_shared"); } return a; } public: Beta_projectors_base(Simulation_context& ctx__, Gvec const& gkvec__, std::vector<int> const& igk__) : ctx_(ctx__) , gkvec_(gkvec__) , igk_(igk__) { split_in_chunks(); if (!num_beta_t()) { return; } /* allocate memory */ for (int i = 0; i < N; i++) { pw_coeffs_t_[i] = matrix<double_complex>(num_gkvec_loc(), num_beta_t(), memory_t::host, "pw_coeffs_t_"); } if (ctx_.processing_unit() == GPU) { gkvec_coord_ = mdarray<double, 2>(3, num_gkvec_loc(), ctx__.dual_memory_t()); /* copy G+k vectors */ for (int igk_loc = 0; igk_loc < num_gkvec_loc(); igk_loc++) { auto vgk = gkvec_.gkvec(igk_[igk_loc]); for (auto x: {0, 1, 2}) { gkvec_coord_(x, igk_loc) = vgk[x]; } } gkvec_coord_.copy<memory_t::host, memory_t::device>(); } } ~Beta_projectors_base() { beta_phi_shared(0, memory_t::none) = mdarray<double, 1>(); } inline int num_gkvec_loc() const { return static_cast<int>(igk_.size()); } inline Unit_cell const& unit_cell() const { return ctx_.unit_cell(); } matrix<double_complex>& pw_coeffs_t(int i__) { return pw_coeffs_t_[i__]; } /// Plane wave coefficients of |beta> projectors for a chunk of atoms. matrix<double_complex>& pw_coeffs_a() { return pw_coeffs_a_; } /// Calculate inner product between beta-projectors and wave-functions. /** The following is computed: <beta|phi> */ template <typename T> inline matrix<T> inner(int chunk__, Wave_functions& phi__, int ispn__, int idx0__, int n__) { PROFILE("sirius::Beta_projectors_base::inner"); assert(num_gkvec_loc() == phi__.pw_coeffs(ispn__).num_rows_loc()); int nbeta = chunk(chunk__).num_beta_; static_assert(std::is_same<T, double_complex>::value || std::is_same<T, double>::value, "wrong type"); int tsz = std::is_same<T, double_complex>::value ? 2 : 1; auto& buf = beta_phi_shared(tsz * nbeta * n__, ctx_.dual_memory_t()); matrix<T> beta_phi; switch (ctx_.processing_unit()) { case CPU: { beta_phi = matrix<T>(reinterpret_cast<T*>(buf.template at<CPU>()), nbeta, n__); break; } case GPU: { beta_phi = matrix<T>(reinterpret_cast<T*>(buf.template at<CPU>()), reinterpret_cast<T*>(buf.template at<GPU>()), nbeta, n__); break; } } if (std::is_same<T, double_complex>::value) { switch (ctx_.processing_unit()) { case CPU: { /* compute <beta|phi> */ linalg<CPU>::gemm(2, 0, nbeta, n__, num_gkvec_loc(), pw_coeffs_a().template at<CPU>(), num_gkvec_loc(), phi__.pw_coeffs(ispn__).prime().at<CPU>(0, idx0__), phi__.pw_coeffs(ispn__).prime().ld(), reinterpret_cast<double_complex*>(beta_phi.template at<CPU>()), nbeta); break; } case GPU: { #ifdef __GPU linalg<GPU>::gemm(2, 0, nbeta, n__, num_gkvec_loc(), pw_coeffs_a().template at<GPU>(), num_gkvec_loc(), phi__.pw_coeffs(ispn__).prime().at<GPU>(0, idx0__), phi__.pw_coeffs(ispn__).prime().ld(), reinterpret_cast<double_complex*>(beta_phi.template at<GPU>()), nbeta); beta_phi.template copy<memory_t::device, memory_t::host>(); #else TERMINATE_NO_GPU #endif break; } } } if (std::is_same<T, double>::value) { double a{2}; double a1{-1}; double b{0}; switch (ctx_.processing_unit()) { case CPU: { /* compute <beta|phi> */ linalg<CPU>::gemm(2, 0, nbeta, n__, 2 * num_gkvec_loc(), a, reinterpret_cast<double*>(pw_coeffs_a().template at<CPU>()), 2 * num_gkvec_loc(), reinterpret_cast<double*>(phi__.pw_coeffs(ispn__).prime().at<CPU>(0, idx0__)), 2 * phi__.pw_coeffs(ispn__).prime().ld(), b, reinterpret_cast<double*>(beta_phi.template at<CPU>()), nbeta); if (gkvec_.comm().rank() == 0) { /* subtract one extra G=0 contribution */ linalg<CPU>::ger(nbeta, n__, a1, reinterpret_cast<double*>(pw_coeffs_a().template at<CPU>()), 2 * num_gkvec_loc(), reinterpret_cast<double*>(phi__.pw_coeffs(ispn__).prime().at<CPU>(0, idx0__)), 2 * phi__.pw_coeffs(ispn__).prime().ld(), reinterpret_cast<double*>(beta_phi.template at<CPU>()), nbeta); } break; } case GPU: { #ifdef __GPU linalg<GPU>::gemm(2, 0, nbeta, n__, 2 * num_gkvec_loc(), &a, reinterpret_cast<double*>(pw_coeffs_a().template at<GPU>()), 2 * num_gkvec_loc(), reinterpret_cast<double*>(phi__.pw_coeffs(ispn__).prime().at<GPU>(0, idx0__)), 2 * phi__.pw_coeffs(ispn__).prime().ld(), &b, reinterpret_cast<double*>(beta_phi.template at<GPU>()), nbeta); if (gkvec_.comm().rank() == 0) { /* subtract one extra G=0 contribution */ linalg<GPU>::ger(nbeta, n__, &a1, reinterpret_cast<double*>(pw_coeffs_a().template at<GPU>()), 2 * num_gkvec_loc(), reinterpret_cast<double*>(phi__.pw_coeffs(ispn__).prime().template at<GPU>(0, idx0__)), 2 * phi__.pw_coeffs(ispn__).prime().ld(), reinterpret_cast<double*>(beta_phi.template at<GPU>()), nbeta); } beta_phi.template copy<memory_t::device, memory_t::host>(); #else TERMINATE_NO_GPU #endif break; } } } gkvec_.comm().allreduce(beta_phi.template at<CPU>(), static_cast<int>(beta_phi.size())); if (ctx_.processing_unit() == GPU) { beta_phi.template copy<memory_t::host, memory_t::device>(); } return std::move(beta_phi); } /// Generate beta-projectors for a chunk of atoms. void generate(int ichunk__, int j__) { PROFILE("sirius::Beta_projectors_base::generate"); auto& pw_coeffs = pw_coeffs_a(); switch (ctx_.processing_unit()) { case CPU: { #pragma omp for for (int i = 0; i < chunk(ichunk__).num_atoms_; i++) { int ia = chunk(ichunk__).desc_(beta_desc_idx::ia, i); double phase = twopi * dot(gkvec_.vk(), ctx_.unit_cell().atom(ia).position()); double_complex phase_k = std::exp(double_complex(0.0, phase)); std::vector<double_complex> phase_gk(num_gkvec_loc()); for (int igk_loc = 0; igk_loc < num_gkvec_loc(); igk_loc++) { auto G = gkvec_.gvec(igk_[igk_loc]); /* total phase e^{i(G+k)r_{\alpha}} */ phase_gk[igk_loc] = std::conj(ctx_.gvec_phase_factor(G, ia) * phase_k); } for (int xi = 0; xi < chunk(ichunk__).desc_(beta_desc_idx::nbf, i); xi++) { for (int igk_loc = 0; igk_loc < num_gkvec_loc(); igk_loc++) { pw_coeffs(igk_loc, chunk(ichunk__).desc_(beta_desc_idx::offset, i) + xi) = pw_coeffs_t_[j__](igk_loc, chunk(ichunk__).desc_(beta_desc_idx::offset_t, i) + xi) * phase_gk[igk_loc]; } } } break; } case GPU: { #ifdef __GPU auto& desc = chunk(ichunk__).desc_; create_beta_gk_gpu(chunk(ichunk__).num_atoms_, num_gkvec_loc(), desc.template at<GPU>(), pw_coeffs_t_[j__].template at<GPU>(), gkvec_coord_.template at<GPU>(), chunk(ichunk__).atom_pos_.template at<GPU>(), pw_coeffs.template at<GPU>()); #endif break; } } } void prepare() { PROFILE("sirius::Beta_projectors_base::prepare"); auto& buf = pw_coeffs_a_shared(num_gkvec_loc() * max_num_beta(), ctx_.dual_memory_t()); switch (ctx_.processing_unit()) { case CPU: { pw_coeffs_a_ = matrix<double_complex>(buf.template at<CPU>(), num_gkvec_loc(), max_num_beta()); break; } case GPU: { pw_coeffs_a_ = matrix<double_complex>(buf.template at<CPU>(), buf.template at<GPU>(), num_gkvec_loc(), max_num_beta()); break; } } if (ctx_.processing_unit() == GPU && reallocate_pw_coeffs_t_on_gpu_) { for (int i = 0; i < N; i++) { pw_coeffs_t_[i].allocate(memory_t::device); pw_coeffs_t_[i].template copy<memory_t::host, memory_t::device>(); } } } void dismiss() { PROFILE("sirius::Beta_projectors_base::dismiss"); if (ctx_.processing_unit() == GPU && reallocate_pw_coeffs_t_on_gpu_) { for (int i = 0; i < N; i++) { pw_coeffs_t_[i].deallocate(memory_t::device); } } } static void cleanup() { beta_phi_shared(0, memory_t::host | memory_t::device) = mdarray<double, 1>(); pw_coeffs_a_shared(0, memory_t::host|memory_t::device) = mdarray<double_complex, 1>(); } inline int num_beta_t() const { return num_beta_t_; } inline int num_chunks() const { return static_cast<int>(beta_chunks_.size()); } inline beta_chunk_t const& chunk(int idx__) const { return beta_chunks_[idx__]; } inline int max_num_beta() const { return max_num_beta_; } }; } // namespace #endif
chadphys.h
#ifndef CHAD_PHYS_H #define CHAD_PHYS_H #include "3dMath.h" typedef struct { aabb shape; //c.d[3] is sphere radius. //if it's zero or less, it's not a sphere, it's a box mat4 localt; //Local Transform. vec3 v; //velocity vec3 a; //Body specific acceleration, combined with gravity void* d; //User defined pointer. f_ mass; //0 means kinematic, or static. Defaults to zero. f_ bounciness; //default 0, put portion of displacement into velocity. f_ airfriction; //default 1, multiplied by velocity every time timestep. f_ friction; //default 0.1 } phys_body; typedef struct{ vec3 g; //gravity phys_body** bodies; f_ ms; //max speed int nbodies; //number of bodies } phys_world; static inline void initPhysBody(phys_body* body){ body->shape = (aabb){ .c=(vec4){.d[0] = 0,.d[1] = 0,.d[2] = 0,.d[3] = 0}, .e=(vec3){.d[0] = 0,.d[1] = 0,.d[2] = 0} }; body->mass = 0; body->bounciness = 0; body->friction = 0.99; //The amount of coplanar velocity preserved in collisions. body->airfriction = 1.0; body->a = (vec3){.d[0] = 0,.d[1] = 0,.d[2] = 0}; body->localt = identitymat4(); body->d = NULL; } static inline mat4 getPhysBodyRenderTransform(phys_body* body){ return multm4( translate(downv4(body->shape.c)), body->localt ); } //Check for and, if necessary, resolve colliding bodies. static inline void resolveBodies(phys_body* a, phys_body* b){ if(a->mass > 0 || b->mass > 0){ //Perform a preliminary check. Do we even have to do anything? /*We must do shit*/ } else {return;} //Optimized for branch prediction. vec4 penvec = (vec4){ .d[0]=0, .d[1]=0, .d[2]=0, .d[3]=0 }; //Check if the two bodies are colliding. if(a->shape.c.d[3] > 0 && b->shape.c.d[3] > 0) //Both Spheres! { penvec = spherevsphere(a->shape.c, b->shape.c); } else if(a->shape.c.d[3] <= 0 && b->shape.c.d[3] <= 0) //Both boxes! { penvec = boxvbox(a->shape,b->shape); } else if (a->shape.c.d[3] > 0 && b->shape.c.d[3] <= 0) //a is a sphere, b is a box { penvec = spherevaabb(a->shape.c,b->shape); } else if (a->shape.c.d[3] <= 0 && b->shape.c.d[3] > 0){ //a is a box, b is a sphere penvec = spherevaabb(b->shape.c,a->shape); penvec.d[0] *= -1; penvec.d[1] *= -1; penvec.d[2] *= -1; } #ifdef CHADPHYS_DEBUG else { puts("\nInvalid configuration. Error.\n"); } #endif if(penvec.d[3] <= 0) return; //No penetration detected, or invalid configuration. vec3 penvecnormalized = scalev3(1.0/penvec.d[3], downv4(penvec)); //the penetration vector points into B... f_ friction = a->friction * b->friction; //We now have the penetration vector. There is a penetration. //determine how much each should be displaced by. //The penvec points INTO A and is of length penvec.d[3] f_ bdisplacefactor = a->mass / (a->mass + b->mass); f_ adisplacefactor = b->mass / (a->mass + b->mass); vec3 comvel; if(!(a->mass > 0)) { adisplacefactor = 0; bdisplacefactor = 1;comvel = (vec3){{0,0,0}}; }else if(!(b->mass > 0)) { bdisplacefactor = 0; adisplacefactor = 1;comvel = (vec3){{0,0,0}}; }else{ comvel = addv3( scalev3(bdisplacefactor, a->v), scalev3(adisplacefactor, b->v)); } if(a->mass > 0){ vec4 displacea = scalev4(-adisplacefactor, penvec); vec3 a_relvel = subv3(a->v, comvel); vec3 a_planarvel = subv3(a_relvel, scalev3( dotv3(a_relvel, penvecnormalized), penvecnormalized ) ); a->shape.c.d[0] += displacea.d[0]; a->shape.c.d[1] += displacea.d[1]; a->shape.c.d[2] += displacea.d[2]; a->v = addv3( comvel, scalev3(1-friction, a_planarvel) ); //The center of mass velocity, plus a portion of coplanar velocity. a->v = addv3(a->v, scalev3( a->bounciness, downv4(displacea) ) ); } if(b->mass > 0){ vec4 displaceb = scalev4(bdisplacefactor, penvec); vec3 b_relvel = subv3(b->v, comvel); vec3 b_planarvel = subv3(b_relvel, //brelvel - portion of brelvel in the direction of penvecnormalized scalev3( dotv3(b_relvel, penvecnormalized), //the component in that direction penvecnormalized //that direction ) ); #pragma omp simd for(int i = 0; i < 3; i++) b->shape.c.d[i] += displaceb.d[i]; b->v = addv3(comvel, scalev3(1-friction, b_planarvel) ); //The center of mass velocity, plus a portion of coplanar velocity. b->v = addv3(b->v, scalev3( b->bounciness, downv4(displaceb) ) ); } } static inline void stepPhysWorld(phys_world* world, const int collisioniter){ for(int i = 0; i < world->nbodies; i++) if(world->bodies[i] && world->bodies[i]->mass > 0){ phys_body* body = world->bodies[i]; vec3 bodypos = addv3(downv4(body->shape.c),body->v); body->shape.c.d[0] = bodypos.d[0]; body->shape.c.d[1] = bodypos.d[1]; body->shape.c.d[2] = bodypos.d[2]; body->v = addv3(body->v, body->a); body->v = addv3(body->v, world->g); } //Resolve collisions (if any) for(int iter = 0; iter < collisioniter; iter++) for(int i = 0; i < (int)(world->nbodies-1); i++) if(world->bodies[i]) for(int j = i+1; j < (int)world->nbodies; j++) if(world->bodies[j]) resolveBodies(world->bodies[i], world->bodies[j]); } #endif
problem0.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #include <stdint.h> #include "../timer.h" //------------------------------------------------------------------------------------------------------------------------------ void initialize_problem(domain_type *domain, int level, double hLevel, double a, double b){ double NPi = 2.0*M_PI; double Bmin = 1.0; double Bmax = 10.0; double c2 = (Bmax-Bmin)/2; double c1 = (Bmax+Bmin)/2; double c3=10.0; // how sharply (B)eta transitions double c4 = -5.0/0.25; int box; for(box=0;box<domain->subdomains_per_rank;box++){ memset(domain->subdomains[box].levels[level].grids[__u_exact],0,domain->subdomains[box].levels[level].volume*sizeof(double)); memset(domain->subdomains[box].levels[level].grids[__f ],0,domain->subdomains[box].levels[level].volume*sizeof(double)); int i,j,k; #pragma omp parallel for private(k,j,i) collapse(2) for(k=0;k<domain->subdomains[box].levels[level].dim.k;k++){ for(j=0;j<domain->subdomains[box].levels[level].dim.j;j++){ for(i=0;i<domain->subdomains[box].levels[level].dim.i;i++){ double x = hLevel*((double)(i+domain->subdomains[box].levels[level].low.i)+0.5); double y = hLevel*((double)(j+domain->subdomains[box].levels[level].low.j)+0.5); double z = hLevel*((double)(k+domain->subdomains[box].levels[level].low.k)+0.5); int ijk = (i+domain->subdomains[box].levels[level].ghosts)+ domain->subdomains[box].levels[level].pencil*(j+domain->subdomains[box].levels[level].ghosts)+ domain->subdomains[box].levels[level].plane *(k+domain->subdomains[box].levels[level].ghosts); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // constant coefficient double A = 1.0; double B = 1.0; double Bx = 0.0; double By = 0.0; double Bz = 0.0; //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #if 1 // should be continuous in u, u', u'', u''', and u'''' to guarantee high order and periodic boundaries // u = ax^6 + bx^5 + cx^4 + dx^3 + ex^2 + fx + g // ux = 6ax^5 + 5bx^4 + 4cx^3 + 3dx^2 + 2ex + f // uxx = 30ax^4 + 20bx^3 + 12cx^2 + 6dx + 2e // a = 42.0 // b = -126.0 // c = 105.0 // d = 0.0 // e = -21.0 // f = 0.0 // g = 1.0 double X = 42.0*pow(x,6) - 126.0*pow(x,5) + 105.0*pow(x,4) - 21.0*pow(x,2) + 1.0; double Y = 42.0*pow(y,6) - 126.0*pow(y,5) + 105.0*pow(y,4) - 21.0*pow(y,2) + 1.0; double Z = 42.0*pow(z,6) - 126.0*pow(z,5) + 105.0*pow(z,4) - 21.0*pow(z,2) + 1.0; double Xx = 252.0*pow(x,5) - 630.0*pow(x,4) + 420.0*pow(x,3) - 42.0*x; double Yy = 252.0*pow(y,5) - 630.0*pow(y,4) + 420.0*pow(y,3) - 42.0*y; double Zz = 252.0*pow(z,5) - 630.0*pow(z,4) + 420.0*pow(z,3) - 42.0*z; double Xxx = 1260.0*pow(x,4) - 2520.0*pow(x,3) + 1260.0*pow(x,2) - 42.0; double Yyy = 1260.0*pow(y,4) - 2520.0*pow(y,3) + 1260.0*pow(y,2) - 42.0; double Zzz = 1260.0*pow(z,4) - 2520.0*pow(z,3) + 1260.0*pow(z,2) - 42.0; double u = X *Y *Z ; double ux = Xx *Y *Z ; double uy = X *Yy *Z ; double uz = X *Y *Zz ; double uxx = Xxx*Y *Z ; double uyy = X *Yyy*Z ; double uzz = X *Y *Zzz; #else #if 0 // should be continuous in u, u', and u'' // v(w) = w^4 - 2w^3 + w^2 // u(x,y,z) = v(x)v(y)v(z) double X = 1.0*pow(x,4) - 2.0*pow(x,3) + 1.0*pow(x,2) - 1.0/30.0; double Y = 1.0*pow(y,4) - 2.0*pow(y,3) + 1.0*pow(y,2) - 1.0/30.0; double Z = 1.0*pow(z,4) - 2.0*pow(z,3) + 1.0*pow(z,2) - 1.0/30.0; double Xx = 4.0*pow(x,3) - 6.0*pow(x,2) + 2.0*x; double Yy = 4.0*pow(y,3) - 6.0*pow(y,2) + 2.0*y; double Zz = 4.0*pow(z,3) - 6.0*pow(z,2) + 2.0*z; double Xxx = 12.0*pow(x,2) - 12.0*x + 2.0; double Yyy = 12.0*pow(y,2) - 12.0*y + 2.0; double Zzz = 12.0*pow(z,2) - 12.0*z + 2.0; double u = X*Y*Z; double ux = Xx*Y*Z; double uy = X*Yy*Z; double uz = X*Y*Zz; double uxx = Xxx*Y*Z; double uyy = X*Yyy*Z; double uzz = X*Y*Zzz; #else double u = sin(NPi*x)*sin(NPi*y)*sin(NPi*z); double ux = NPi*cos(NPi*x)*sin(NPi*y)*sin(NPi*z); double uy = NPi*sin(NPi*x)*cos(NPi*y)*sin(NPi*z); double uz = NPi*sin(NPi*x)*sin(NPi*y)*cos(NPi*z); double uxx = -NPi*NPi*sin(NPi*x)*sin(NPi*y)*sin(NPi*z); double uyy = -NPi*NPi*sin(NPi*x)*sin(NPi*y)*sin(NPi*z); double uzz = -NPi*NPi*sin(NPi*x)*sin(NPi*y)*sin(NPi*z); #endif #endif //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - double f = a*A*u - b*( (Bx*ux + By*uy + Bz*uz) + B*(uxx + uyy + uzz) ); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - domain->subdomains[box].levels[level].grids[__alpha ][ijk] = A; domain->subdomains[box].levels[level].grids[__beta ][ijk] = B; domain->subdomains[box].levels[level].grids[__u_exact][ijk] = u; domain->subdomains[box].levels[level].grids[__f ][ijk] = f; }}} } double average_value_of_f = mean(domain,level,__f); if(domain->rank==0){printf("\n average value of f = %20.12e\n",average_value_of_f);fflush(stdout);} if(a!=0){ shift_grid(domain,level,__f ,__f ,-average_value_of_f); shift_grid(domain,level,__u_exact,__u_exact,-average_value_of_f/a); } // what if a==0 and average_value_of_f != 0 ??? } //------------------------------------------------------------------------------------------------------------------------------
test.c
#include <stdlib.h> #include <check.h> #include <omp.h> START_TEST(omp_atomic) {/*{{{*/ int a = 42; int a_copy = a; #pragma omp parallel shared(a) { #pragma omp single { #pragma omp task shared(a) { #pragma omp atomic a++; } #pragma omp task shared(a) { #pragma omp atomic a--; } } } /* Since the operations are symmetric and mirrored, the variable should have its original value*/ ck_assert_int_eq(a, a_copy); }/*}}}*/ END_TEST Suite* test_suite(void) {/*{{{*/ Suite* s = suite_create("Test"); TCase* tc = tcase_create("omp_atomic"); tcase_add_test(tc, omp_atomic); suite_add_tcase(s, tc); return s; }/*}}}*/ int main(void) {/*{{{*/ int number_failed; Suite* s; SRunner* sr; s = test_suite(); sr = srunner_create(s); srunner_run_all(sr, CK_VERBOSE); number_failed = srunner_ntests_failed(sr); srunner_free(sr); return (number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; }/*}}}*/
kernel.h
void sneaky_snake( const int nTeams, const int nThreads, const uint*__restrict F_ReadSeq, const uint*__restrict F_RefSeq, int*__restrict Ftest_Results, const int NumReads, const int F_ErrorThreshold) { #pragma omp target teams distribute parallel for num_teams(nTeams) thread_limit(nThreads) for (int tid = 0;tid < NumReads; tid++) { uint ReadsPerThread[NBytes]; uint RefsPerThread[NBytes]; #pragma unroll for (int i = 0; i < NBytes; i++) { ReadsPerThread[i] = F_ReadSeq[tid*8 + i]; RefsPerThread[i] = F_RefSeq[tid*8 + i]; } ///////////////////////////////////////////////////////////////////////////// Ftest_Results[tid] = 1; uint ReadCompTmp = 0; uint RefCompTmp = 0; uint DiagonalResult = 0; uint ReadTmp1 = 0; uint ReadTmp2 = 0; uint RefTmp1 = 0; uint RefTmp2 = 0; uint CornerCase = 0; int localCounter= 0; int localCounterMax=0; int globalCounter = 0; int Max_leading_zeros = 0; int AccumulatedErrs = 0; int Diagonal = 0; int ShiftValue = 0; int j = 0; //specifying the j-th int that we are reading in each read-ref comparison (can be from 0 to 7) while ( (j < 7) && (globalCounter < 200)) { Diagonal = 0; RefTmp1 = lsl(RefsPerThread[j], ShiftValue); RefTmp2 = lsr(RefsPerThread[j + 1], 32 - ShiftValue); ReadTmp1 = lsl(ReadsPerThread[j], ShiftValue); ReadTmp2 = lsr(ReadsPerThread[j + 1], 32 - ShiftValue); ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounterMax = __clz(DiagonalResult); //////////////////// Upper diagonals ///////////////////// for(int e = 1; e <= F_ErrorThreshold; e++) { Diagonal += 1; CornerCase = 0; if ( (j == 0) && ( (ShiftValue - (2*e)) < 0 ) ) { ReadTmp1 = lsr(ReadsPerThread[j], 2*e - ShiftValue); ReadTmp2 = 0; ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; CornerCase = 0; for(int Ci = 0; Ci < (2*e) - ShiftValue; Ci++) { set_bit(CornerCase, 31 - Ci); } DiagonalResult = DiagonalResult | CornerCase; localCounter = __clz(DiagonalResult); } else if ( (ShiftValue - (2*e) ) < 0 ) { ReadTmp1 = lsl(ReadsPerThread[j-1], 32 - (2*e - ShiftValue)); ReadTmp2 = lsr(ReadsPerThread[j], 2*e - ShiftValue); ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounter = __clz(DiagonalResult); } else { ReadTmp1 = lsl(ReadsPerThread[j], ShiftValue - 2*e); ReadTmp2 = lsr(ReadsPerThread[j+1], 32 - (ShiftValue - 2*e)) ; ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounter = __clz(DiagonalResult); } if (localCounter>localCounterMax) localCounterMax=localCounter; } /* sh = shift up = upper diagonal RC = ReadCompTmp FC = RefCompTmp D = DiagonalResult DN = diagonal LC = localCounter */ //////////////////// Lower diagonals ///////////////////// for(int e = 1; e <= F_ErrorThreshold; e++) { Diagonal += 1; CornerCase = 0; if (j<5) { if ((ShiftValue + 2*e) < 32) { ReadTmp1 = lsl(ReadsPerThread[j], ShiftValue + 2*e); ReadTmp2 = lsr(ReadsPerThread[j+1], 32 - (ShiftValue + 2*e)); ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounter = __clz(DiagonalResult); } else { ReadTmp1 = lsl(ReadsPerThread[j+1], (ShiftValue + 2*e) % 32); ReadTmp2 = lsr(ReadsPerThread[j+2], 32 - (ShiftValue + 2*e) % 32); ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = 0xffffffff;//ReadCompTmp ^ RefCompTmp; DiagonalResult = ReadCompTmp ^ RefCompTmp; localCounter = __clz(DiagonalResult); } } else { ReadTmp1 = lsl(ReadsPerThread[j], ShiftValue + 2*e); ReadTmp2 = lsr(ReadsPerThread[j+1], 32 - (ShiftValue + 2*e)); ReadCompTmp = ReadTmp1 | ReadTmp2; RefCompTmp = RefTmp1 | RefTmp2; DiagonalResult = ReadCompTmp ^ RefCompTmp; CornerCase = 0; if ((globalCounter+32)>200) { for(int Ci = globalCounter+32-200; Ci < globalCounter+32-200+2*e; Ci++) { set_bit(CornerCase, Ci); } } else if ((globalCounter+32)>=(200- (2*e))){ for(int Ci = 0; Ci < (2*e); Ci++) { set_bit(CornerCase, Ci); } } DiagonalResult = DiagonalResult | CornerCase; localCounter = __clz(DiagonalResult); } if (localCounter>localCounterMax) localCounterMax=localCounter; } /* CC = CornerCase sh = shift up = upper diagonal RC = ReadCompTmp FC = RefCompTmp D = DiagonalResult DN = diagonal LC = localCounter */ Max_leading_zeros = 0; if ( (j == 6) && ( ((localCounterMax/2)*2) >= 8) ) { Max_leading_zeros = 8; break; } else if((localCounterMax/2*2) > Max_leading_zeros) { Max_leading_zeros = ((localCounterMax/2)*2); } if (((Max_leading_zeros/2) < 16) && (j < 5)) { AccumulatedErrs += 1; } else if ((j == 6) && ((Max_leading_zeros/2) < 4)) { AccumulatedErrs += 1; } if(AccumulatedErrs > F_ErrorThreshold) { Ftest_Results[tid] = 0; break; } if(ShiftValue + Max_leading_zeros + 2 >= 32) { j += 1; } // ShiftValue_2Ref = (ShiftValue_2Ref + Max_leading_zeros + 2) %32; if (Max_leading_zeros == 32) { globalCounter += Max_leading_zeros; } else { ShiftValue = ((ShiftValue + Max_leading_zeros + 2) % 32); globalCounter += (Max_leading_zeros + 2); } } } }
MotifFinder.h
#ifndef __MOTIF_FINDER_H__ #define __MOTIF_FINDER_H__ // discover motifs and store #times the motif cross for each edge #include "CPUFilter.h" #include "CPUGraph.h" #include "CPUIntersection.h" #include "TraversalPlan.h" #include "SpinLock.h" #include "TimeMeasurer.h" #include <algorithm> #if defined(OPENMP) #include <omp.h> #endif class MotifFinder { public: MotifFinder(TraversalPlan *plan, Graph *rel, size_t thread_num) : plan_(plan), graph_(rel), thread_num_(thread_num) {} ~MotifFinder() {} virtual void Execute() { // Assume the original vertex ids is the same as the search order AllConnType intersect_levels; AllCondType conditions; plan_->GetOrderedConnectivity(intersect_levels); plan_->GetOrderedOrdering(conditions); edge_cross_times_ = new size_t[graph_->GetEdgeCount()]; memset(edge_cross_times_, 0, sizeof(size_t) * graph_->GetEdgeCount()); locks_ = new SpinLock[graph_->GetEdgeCount()]; for (size_t i = 0; i < graph_->GetEdgeCount(); ++i) { locks_[i].Init(); } #if defined(OPENMP) omp_set_num_threads(thread_num_); TimeMeasurer timer; timer.StartTimer(); size_t pattern_vertex_count = plan_->GetVertexCount(); total_match_count_ = 0; auto paths = new uintV *[thread_num_]; for (size_t i = 0; i < thread_num_; ++i) { paths[i] = new uintV[pattern_vertex_count]; } long long total_match_count = 0; #pragma omp parallel for schedule(dynamic) reduction(+ : total_match_count) for (uintV u = 0; u < graph_->GetVertexCount(); ++u) { long long ans = 0; size_t thread_id = omp_get_thread_num(); paths[thread_id][0] = u; DFS(thread_id, 1, paths[thread_id], ans, intersect_levels, conditions); // std::cout << "thread_id=" << thread_id << ",u=" << u << ",ans=" << ans // << std::endl; total_match_count_ += ans; } for (size_t i = 0; i < thread_num_; ++i) { delete[] paths[i]; paths[i] = NULL; } delete[] paths; paths = NULL; timer.EndTimer(); std::cout << "total_match_count_=" << total_match_count_ << ", elapsed_time=" << timer.GetElapsedMicroSeconds() / 1000.0 << "ms" << std::endl; total_match_count_ = total_match_count; #endif // end of !OPENMP } void DFS(size_t thread_id, size_t cur_level, uintV *path, long long &ans, AllConnType &intersect_levels, AllCondType &conditions) { auto row_ptrs = graph_->GetRowPtrs(); auto cols = graph_->GetCols(); size_t pattern_vertex_count = plan_->GetVertexCount(); if (cur_level == pattern_vertex_count) { ans++; // for this instance, update the corresponding edge weights. auto &connectivity = plan_->GetConnectivity(); for (size_t l = 0; l < pattern_vertex_count; ++l) { for (size_t pred_id = 0; pred_id < connectivity[l].size(); ++pred_id) { size_t l2 = connectivity[l][pred_id]; auto u = path[l]; auto v = path[l2]; // edge u-v auto vindex = std::lower_bound(cols + row_ptrs[u], cols + row_ptrs[u + 1], v) - (cols + row_ptrs[u]) + row_ptrs[u]; assert(vindex < row_ptrs[u + 1] && vindex >= row_ptrs[u]); locks_[vindex].Lock(); edge_cross_times_[vindex]++; locks_[vindex].Unlock(); } } return; } if (intersect_levels[cur_level].size() == 0) { for (uintV i = 0; i < graph_->GetVertexCount(); ++i) { if (CheckCondition(path, i, conditions[cur_level]) == false || CheckEquality(path, cur_level, i)) continue; path[cur_level] = i; DFS(thread_id, cur_level + 1, path, ans, intersect_levels, conditions); } } else { std::vector<uintV> res[2]; for (size_t j = 0; j < intersect_levels[cur_level].size(); ++j) { size_t p2 = intersect_levels[cur_level][j]; auto first = path[p2]; auto first_begin = &cols[row_ptrs[first]]; auto first_end = &cols[row_ptrs[first + 1]]; if (j == 0) { res[j % 2].assign(first_begin, first_end); } else { size_t max_size = std::min((size_t)(first_end - first_begin), res[(j + 1) % 2].size()); res[j % 2].resize(max_size); size_t res_size = SortedIntersection( first_begin, first_end, res[(j + 1) % 2].begin(), res[(j + 1) % 2].end(), res[j % 2].begin()); assert(res_size <= max_size); res[j % 2].resize(res_size); } } // based on the candidates set std::vector<uintV> &candidates = res[(intersect_levels[cur_level].size() + 1) % 2]; for (size_t i = 0; i < candidates.size(); ++i) { if (CheckCondition(path, candidates[i], conditions[cur_level]) == false || CheckEquality(path, cur_level, candidates[i])) continue; path[cur_level] = candidates[i]; DFS(thread_id, cur_level + 1, path, ans, intersect_levels, conditions); } } } size_t *GetEdgeCrossTimes() const { return edge_cross_times_; } size_t GetTotalMatchCount() const { return total_match_count_; } public: TraversalPlan *plan_; Graph *graph_; size_t thread_num_; size_t total_match_count_; size_t *edge_cross_times_; SpinLock *locks_; }; #endif
multisort-omp-task-rama.c
#include <malloc.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "omp.h" #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6f\n",(_m), stamp); // N and MIN must be powers of 2 long N; long MIN_SORT_SIZE; long MIN_MERGE_SIZE; int CUTOFF; #define BLOCK_SIZE 1024L #define T int void basicsort(long n, T data[n]); void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length); void merge(long n, T left[n], T right[n], T result[n*2], long start, long length) { if (length < MIN_MERGE_SIZE*2L) { // Base case basicmerge(n, left, right, result, start, length); } else { // Recursive decomposition #pragma omp task merge(n, left, right, result, start, length/2); #pragma omp task merge(n, left, right, result, start + length/2, length/2); #pragma omp taskwait } } void multisort(long n, T data[n], T tmp[n]) { if (n >= MIN_SORT_SIZE*4L) { // Recursive decomposition #pragma omp task multisort(n/4L, &data[0], &tmp[0]); #pragma omp task multisort(n/4L, &data[n/4L], &tmp[n/4L]); #pragma omp task multisort(n/4L, &data[n/2L], &tmp[n/2L]); #pragma omp task multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L]); #pragma omp taskwait #pragma omp task merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L); #pragma omp task merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L); #pragma omp taskwait #pragma omp task merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n); #pragma omp taskwait } else { // Base case basicsort(n, data); } } static void initialize(long length, T data[length]) { long i; for (i = 0; i < length; i++) { if (i==0) { data[i] = rand(); } else { data[i] = ((data[i-1]+1) * i * 104723L) % N; } } } static void clear(long length, T data[length]) { long i; for (i = 0; i < length; i++) { data[i] = 0; } } void check_sorted(long n, T data[n]) { int unsorted=0; for (int i=1; i<n; i++) if (data[i-1] > data[i]) unsorted++; if (unsorted > 0) printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted); else { // printf ("data IS ordered; "); } } int main(int argc, char **argv) { /* Defaults for command line arguments */ N = 32768 * BLOCK_SIZE; MIN_SORT_SIZE = 32 * BLOCK_SIZE; MIN_MERGE_SIZE = 32 * BLOCK_SIZE;; CUTOFF = 4; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-n")==0) { N = atol(argv[++i]) * BLOCK_SIZE; } else if (strcmp(argv[i], "-s")==0) { MIN_SORT_SIZE = atol(argv[++i]) * BLOCK_SIZE; } else if (strcmp(argv[i], "-m")==0) { MIN_MERGE_SIZE = atol(argv[++i]) * BLOCK_SIZE; } else if (strcmp(argv[i], "-c")==0) { CUTOFF = atoi(argv[++i]); } else { fprintf(stderr, "Usage: %s [-n vector_size -s MIN_SORT_SIZE -m MIN_MERGE_SIZE]\n", argv[0]); fprintf(stderr, " -n to specify the size of the vector (in Kelements) to sort (default 32768)\n"); fprintf(stderr, " -s to specify the size of the vector (in Kelements) that breaks recursion in the sort phase (default 32)\n"); fprintf(stderr, " -m to specify the size of the vector (in Kelements) that breaks recursion in the merge phase (default 32)\n"); fprintf(stderr, " -c to specify the cut off recursion level to stop task generation in OpenMP (default 4)\n"); return EXIT_FAILURE; } } fprintf(stdout, "Arguments (Kelements): N=%ld, MIN_SORT_SIZE=%ld, MIN_MERGE_SIZE=%ld\n", N/BLOCK_SIZE, MIN_SORT_SIZE/BLOCK_SIZE, MIN_MERGE_SIZE/BLOCK_SIZE); fprintf(stdout, " CUTOFF=%d\n", CUTOFF); T *data = malloc(N*sizeof(T)); T *tmp = malloc(N*sizeof(T)); double stamp; START_COUNT_TIME; initialize(N, data); clear(N, tmp); STOP_COUNT_TIME("Initialization time in seconds"); START_COUNT_TIME; #pragma omp parallel #pragma omp single multisort(N, data, tmp); STOP_COUNT_TIME("Multisort execution time"); START_COUNT_TIME; check_sorted (N, data); STOP_COUNT_TIME("Check sorted data execution time"); fprintf(stdout, "Multisort program finished\n"); return 0; }
FunctorsOpenMP.h
//============================================================================ // Copyright (c) Kitware, Inc. // All rights reserved. // See LICENSE.txt for details. // This software is distributed WITHOUT ANY WARRANTY; without even // the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the above copyright notice for more information. // // Copyright 2018 National Technology & Engineering Solutions of Sandia, LLC (NTESS). // Copyright 2018 UT-Battelle, LLC. // Copyright 2018 Los Alamos National Security. // // Under the terms of Contract DE-NA0003525 with NTESS, // the U.S. Government retains certain rights in this software. // // Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National // Laboratory (LANL), the U.S. Government retains certain rights in // this software. //============================================================================ #ifndef vtk_m_cont_openmp_internal_FunctorsOpenMP_h #define vtk_m_cont_openmp_internal_FunctorsOpenMP_h #include <vtkm/cont/openmp/internal/DeviceAdapterTagOpenMP.h> #include <vtkm/cont/internal/FunctorsGeneral.h> #include <vtkm/BinaryOperators.h> #include <vtkm/BinaryPredicates.h> #include <vtkm/Pair.h> #include <vtkm/Types.h> #include <vtkm/cont/ArrayHandle.h> #include <vtkm/cont/ErrorExecution.h> #include <omp.h> #include <algorithm> #include <type_traits> #include <vector> // Wrap all '#pragma omp ...' calls in this macro so we can disable them in // non-omp builds and avoid a multitude of 'ignoring pragma..." warnings. #ifdef _OPENMP #define _VTKM_OPENMP_DIRECTIVE_IMPL(fullDir) _Pragma(#fullDir) #define VTKM_OPENMP_DIRECTIVE(dir) _VTKM_OPENMP_DIRECTIVE_IMPL(omp dir) #else // _OPENMP #define VTKM_OPENMP_DIRECTIVE(directive) #endif // _OPENMP // When defined, supported type / operator combinations will use the OpenMP // reduction(...) clause. Otherwise, all reductions use the general // implementation with a manual reduction once the threads complete. // I don't know how, but the benchmarks currently perform better without the // specializations. //#define VTKM_OPENMP_USE_NATIVE_REDUCTION namespace vtkm { namespace cont { namespace openmp { constexpr static vtkm::Id CACHE_LINE_SIZE = 64; constexpr static vtkm::Id PAGE_SIZE = 4096; // Returns ceil(num/den) for integral types template <typename T> static constexpr T CeilDivide(const T& numerator, const T& denominator) { return (numerator + denominator - 1) / denominator; } // Computes the number of values per chunk. Note that numChunks + chunkSize may // exceed numVals, so be sure to check upper limits. static void ComputeChunkSize(const vtkm::Id numVals, const vtkm::Id numThreads, const vtkm::Id chunksPerThread, const vtkm::Id bytesPerValue, vtkm::Id& numChunks, vtkm::Id& valuesPerChunk) { // try to evenly distribute pages across chunks: const vtkm::Id bytesIn = numVals * bytesPerValue; const vtkm::Id pagesIn = CeilDivide(bytesIn, PAGE_SIZE); // If we don't have enough pages to honor chunksPerThread, ignore it: numChunks = (pagesIn > numThreads * chunksPerThread) ? numThreads * chunksPerThread : numThreads; const vtkm::Id pagesPerChunk = CeilDivide(pagesIn, numChunks); valuesPerChunk = CeilDivide(pagesPerChunk * PAGE_SIZE, bytesPerValue); } template <typename T, typename U> static void DoCopy(T src, U dst, vtkm::Id numVals, std::true_type) { if (numVals) { std::copy(src, src + numVals, dst); } } // Don't use std::copy when type conversion is required because MSVC. template <typename InIterT, typename OutIterT> static void DoCopy(InIterT inIter, OutIterT outIter, vtkm::Id numVals, std::false_type) { using ValueType = typename std::iterator_traits<OutIterT>::value_type; for (vtkm::Id i = 0; i < numVals; ++i) { *(outIter++) = static_cast<ValueType>(*(inIter++)); } } template <typename InIterT, typename OutIterT> static void DoCopy(InIterT inIter, OutIterT outIter, vtkm::Id numVals) { using InValueType = typename std::iterator_traits<InIterT>::value_type; using OutValueType = typename std::iterator_traits<OutIterT>::value_type; DoCopy(inIter, outIter, numVals, std::is_same<InValueType, OutValueType>()); } template <typename InPortalT, typename OutPortalT> static void CopyHelper(InPortalT inPortal, OutPortalT outPortal, vtkm::Id inStart, vtkm::Id outStart, vtkm::Id numVals) { using InValueT = typename InPortalT::ValueType; using OutValueT = typename OutPortalT::ValueType; constexpr auto isSame = std::is_same<InValueT, OutValueT>(); auto inIter = vtkm::cont::ArrayPortalToIteratorBegin(inPortal) + inStart; auto outIter = vtkm::cont::ArrayPortalToIteratorBegin(outPortal) + outStart; vtkm::Id valuesPerChunk; VTKM_OPENMP_DIRECTIVE(parallel default(none) shared(inIter, outIter, valuesPerChunk, numVals)) { VTKM_OPENMP_DIRECTIVE(single) { // Evenly distribute full pages to all threads. We manually chunk the // data here so that we can exploit std::copy's memmove optimizations. vtkm::Id numChunks; ComputeChunkSize( numVals, omp_get_num_threads(), 8, sizeof(InValueT), numChunks, valuesPerChunk); } VTKM_OPENMP_DIRECTIVE(for schedule(static)) for (vtkm::Id i = 0; i < numVals; i += valuesPerChunk) { vtkm::Id chunkSize = std::min(numVals - i, valuesPerChunk); DoCopy(inIter + i, outIter + i, chunkSize, isSame); } } } struct CopyIfHelper { vtkm::Id NumValues; vtkm::Id NumThreads; vtkm::Id ValueSize; vtkm::Id NumChunks; vtkm::Id ChunkSize; std::vector<vtkm::Id> EndIds; CopyIfHelper() = default; void Initialize(vtkm::Id numValues, vtkm::Id valueSize) { this->NumValues = numValues; this->NumThreads = omp_get_num_threads(); this->ValueSize = valueSize; // Evenly distribute pages across the threads. We manually chunk the // data here so that we can exploit std::copy's memmove optimizations. ComputeChunkSize( this->NumValues, this->NumThreads, 8, valueSize, this->NumChunks, this->ChunkSize); this->EndIds.resize(this->NumChunks); } template <typename InIterT, typename StencilIterT, typename OutIterT, typename PredicateT> void CopyIf(InIterT inIter, StencilIterT stencilIter, OutIterT outIter, PredicateT pred, vtkm::Id chunk) { vtkm::Id startPos = std::min(chunk * this->ChunkSize, this->NumValues); vtkm::Id endPos = std::min((chunk + 1) * this->ChunkSize, this->NumValues); vtkm::Id outPos = startPos; for (vtkm::Id inPos = startPos; inPos < endPos; ++inPos) { if (pred(stencilIter[inPos])) { outIter[outPos++] = inIter[inPos]; } } this->EndIds[chunk] = outPos; } template <typename OutIterT> vtkm::Id Reduce(OutIterT data) { vtkm::Id endPos = this->EndIds.front(); for (vtkm::Id i = 1; i < this->NumChunks; ++i) { vtkm::Id chunkStart = std::min(i * this->ChunkSize, this->NumValues); vtkm::Id chunkEnd = this->EndIds[i]; vtkm::Id numValuesToCopy = chunkEnd - chunkStart; if (numValuesToCopy > 0 && chunkStart != endPos) { std::copy(data + chunkStart, data + chunkEnd, data + endPos); } endPos += numValuesToCopy; } return endPos; } }; #ifdef VTKM_OPENMP_USE_NATIVE_REDUCTION // OpenMP only declares reduction operations for primitive types. This utility // detects if a type T is supported. template <typename T> struct OpenMPReductionSupported : std::false_type { }; template <> struct OpenMPReductionSupported<Int8> : std::true_type { }; template <> struct OpenMPReductionSupported<UInt8> : std::true_type { }; template <> struct OpenMPReductionSupported<Int16> : std::true_type { }; template <> struct OpenMPReductionSupported<UInt16> : std::true_type { }; template <> struct OpenMPReductionSupported<Int32> : std::true_type { }; template <> struct OpenMPReductionSupported<UInt32> : std::true_type { }; template <> struct OpenMPReductionSupported<Int64> : std::true_type { }; template <> struct OpenMPReductionSupported<UInt64> : std::true_type { }; template <> struct OpenMPReductionSupported<Float32> : std::true_type { }; template <> struct OpenMPReductionSupported<Float64> : std::true_type { }; #else template <typename T> using OpenMPReductionSupported = std::false_type; #endif // VTKM_OPENMP_USE_NATIVE_REDUCTION struct ReduceHelper { // Generic implementation: template <typename PortalT, typename ReturnType, typename Functor> static ReturnType Execute(PortalT portal, ReturnType init, Functor functorIn, std::false_type) { internal::WrappedBinaryOperator<ReturnType, Functor> f(functorIn); const vtkm::Id numVals = portal.GetNumberOfValues(); auto data = vtkm::cont::ArrayPortalToIteratorBegin(portal); bool doParallel = false; int numThreads = 0; std::unique_ptr<ReturnType[]> threadData; VTKM_OPENMP_DIRECTIVE(parallel default(none) firstprivate(f) shared(data, doParallel, numThreads, threadData)) { int tid = omp_get_thread_num(); VTKM_OPENMP_DIRECTIVE(single) { numThreads = omp_get_num_threads(); if (numVals >= numThreads * 2) { doParallel = true; threadData.reset(new ReturnType[numThreads]); } } if (doParallel) { // Use the first (numThreads*2) values for initializing: ReturnType accum; accum = f(data[2 * tid], data[2 * tid + 1]); // Assign each thread chunks of the remaining values for local reduction VTKM_OPENMP_DIRECTIVE(for schedule(static)) for (vtkm::Id i = numThreads * 2; i < numVals; i++) { accum = f(accum, data[i]); } threadData[tid] = accum; } } // end parallel if (doParallel) { // do the final reduction serially: for (size_t i = 0; i < static_cast<size_t>(numThreads); ++i) { init = f(init, threadData[i]); } } else { // Not enough threads. Do the entire reduction in serial: for (vtkm::Id i = 0; i < numVals; ++i) { init = f(init, data[i]); } } return init; } #ifdef VTKM_OPENMP_USE_NATIVE_REDUCTION // Specialize for vtkm functors with OpenMP special cases: #define VTKM_OPENMP_SPECIALIZE_REDUCE1(FunctorType, PragmaString) \ template <typename PortalT, typename ReturnType> \ static ReturnType Execute( \ PortalT portal, ReturnType value, FunctorType functorIn, std::true_type) \ { \ const vtkm::Id numValues = portal.GetNumberOfValues(); \ internal::WrappedBinaryOperator<ReturnType, FunctorType> f(functorIn); \ _Pragma(#PragmaString) for (vtkm::Id i = 0; i < numValues; ++i) \ { \ value = f(value, portal.Get(i)); \ } \ return value; \ } // Constructing the pragma string inside the _Pragma call doesn't work so // we jump through a hoop: #define VTKM_OPENMP_SPECIALIZE_REDUCE(FunctorType, Operator) \ VTKM_OPENMP_SPECIALIZE_REDUCE1(FunctorType, "omp parallel for reduction(" #Operator ":value)") // + (Add, Sum) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Add, +) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Sum, +) // * (Multiply, Product) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Multiply, *) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Product, *) // - (Subtract) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Subtract, -) // & (BitwiseAnd) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::BitwiseAnd, &) // | (BitwiseOr) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::BitwiseOr, |) // ^ (BitwiseXor) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::BitwiseXor, ^) // && (LogicalAnd) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::LogicalAnd, &&) // || (LogicalOr) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::LogicalOr, ||) // min (Minimum) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Minimum, min) // max (Maximum) VTKM_OPENMP_SPECIALIZE_REDUCE(vtkm::Maximum, max) #undef VTKM_OPENMP_SPECIALIZE_REDUCE #undef VTKM_OPENMP_SPECIALIZE_REDUCE1 #endif // VTKM_OPENMP_USE_NATIVE_REDUCTION }; template <typename KeysInArray, typename ValuesInArray, typename KeysOutArray, typename ValuesOutArray, typename BinaryFunctor> void ReduceByKeyHelper(KeysInArray keysInArray, ValuesInArray valuesInArray, KeysOutArray keysOutArray, ValuesOutArray valuesOutArray, BinaryFunctor functor) { using KeyType = typename KeysInArray::ValueType; using ValueType = typename ValuesInArray::ValueType; const vtkm::Id numValues = keysInArray.GetNumberOfValues(); auto keysInPortal = keysInArray.PrepareForInput(DeviceAdapterTagOpenMP()); auto valuesInPortal = valuesInArray.PrepareForInput(DeviceAdapterTagOpenMP()); auto keysIn = vtkm::cont::ArrayPortalToIteratorBegin(keysInPortal); auto valuesIn = vtkm::cont::ArrayPortalToIteratorBegin(valuesInPortal); auto keysOutPortal = keysOutArray.PrepareForOutput(numValues, DeviceAdapterTagOpenMP()); auto valuesOutPortal = valuesOutArray.PrepareForOutput(numValues, DeviceAdapterTagOpenMP()); auto keysOut = vtkm::cont::ArrayPortalToIteratorBegin(keysOutPortal); auto valuesOut = vtkm::cont::ArrayPortalToIteratorBegin(valuesOutPortal); internal::WrappedBinaryOperator<ValueType, BinaryFunctor> f(functor); vtkm::Id outIdx = 0; VTKM_OPENMP_DIRECTIVE(parallel default(none) firstprivate(keysIn, valuesIn, keysOut, valuesOut, f) shared(outIdx)) { int tid = omp_get_thread_num(); int numThreads = omp_get_num_threads(); // Determine bounds for this thread's scan operation: vtkm::Id chunkSize = (numValues + numThreads - 1) / numThreads; vtkm::Id scanIdx = std::min(tid * chunkSize, numValues); vtkm::Id scanEnd = std::min(scanIdx + chunkSize, numValues); auto threadKeysBegin = keysOut + scanIdx; auto threadValuesBegin = valuesOut + scanIdx; auto threadKey = threadKeysBegin; auto threadValue = threadValuesBegin; // Reduce each thread's partition: KeyType rangeKey; ValueType rangeValue; for (;;) { if (scanIdx < scanEnd) { rangeKey = keysIn[scanIdx]; rangeValue = valuesIn[scanIdx]; ++scanIdx; // Locate end of current range: while (scanIdx < scanEnd && static_cast<KeyType>(keysIn[scanIdx]) == rangeKey) { rangeValue = f(rangeValue, valuesIn[scanIdx]); ++scanIdx; } *threadKey = rangeKey; *threadValue = rangeValue; ++threadKey; ++threadValue; } else { break; } } if (tid == 0) { outIdx = static_cast<vtkm::Id>(threadKey - threadKeysBegin); } // Combine the reduction results. Skip tid == 0, since it's already in // the correct location: for (int i = 1; i < numThreads; ++i) { // This barrier ensures that: // 1) Threads remain synchronized through this final reduction loop. // 2) The outIdx variable is initialized by thread 0. // 3) All threads have reduced their partitions. VTKM_OPENMP_DIRECTIVE(barrier) if (tid == i) { // Check if the previous thread's last key matches our first: if (outIdx > 0 && threadKeysBegin < threadKey && keysOut[outIdx - 1] == *threadKeysBegin) { valuesOut[outIdx - 1] = f(valuesOut[outIdx - 1], *threadValuesBegin); ++threadKeysBegin; ++threadValuesBegin; } // Copy reduced partition to final location (if needed) if (threadKeysBegin < threadKey && threadKeysBegin != keysOut + outIdx) { std::copy(threadKeysBegin, threadKey, keysOut + outIdx); std::copy(threadValuesBegin, threadValue, valuesOut + outIdx); } outIdx += static_cast<vtkm::Id>(threadKey - threadKeysBegin); } // end tid == i } // end combine reduction } // end parallel keysOutArray.Shrink(outIdx); valuesOutArray.Shrink(outIdx); } template <typename IterT, typename RawPredicateT> struct UniqueHelper { using ValueType = typename std::iterator_traits<IterT>::value_type; using PredicateT = internal::WrappedBinaryOperator<bool, RawPredicateT>; struct Node { vtkm::Id2 InputRange{ -1, -1 }; vtkm::Id2 OutputRange{ -1, -1 }; // Pad the node out to the size of a cache line to prevent false sharing: static constexpr size_t DataSize = 2 * sizeof(vtkm::Id2); static constexpr size_t NumCacheLines = CeilDivide<size_t>(DataSize, CACHE_LINE_SIZE); static constexpr size_t PaddingSize = NumCacheLines * CACHE_LINE_SIZE - DataSize; unsigned char Padding[PaddingSize]; }; IterT Data; vtkm::Id NumValues; PredicateT Predicate; vtkm::Id LeafSize; std::vector<Node> Nodes; size_t NextNode; UniqueHelper(IterT iter, vtkm::Id numValues, RawPredicateT pred) : Data(iter) , NumValues(numValues) , Predicate(pred) , LeafSize(0) , NextNode(0) { } vtkm::Id Execute() { vtkm::Id outSize = 0; VTKM_OPENMP_DIRECTIVE(parallel default(shared)) { VTKM_OPENMP_DIRECTIVE(single) { this->Prepare(); // Kick off task-based divide-and-conquer uniquification: Node* rootNode = this->AllocNode(); rootNode->InputRange = vtkm::Id2(0, this->NumValues); this->Uniquify(rootNode); outSize = rootNode->OutputRange[1] - rootNode->OutputRange[0]; } } return outSize; } private: void Prepare() { // Figure out how many values each thread should handle: int numThreads = omp_get_num_threads(); vtkm::Id chunksPerThread = 8; vtkm::Id numChunks; ComputeChunkSize( this->NumValues, numThreads, chunksPerThread, sizeof(ValueType), numChunks, this->LeafSize); // Compute an upper-bound of the number of nodes in the tree: size_t numNodes = numChunks; while (numChunks > 1) { numChunks = (numChunks + 1) / 2; numNodes += numChunks; } this->Nodes.resize(numNodes); this->NextNode = 0; } Node* AllocNode() { size_t nodeIdx; // GCC emits a false positive "value computed but not used" for this block: #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-value" VTKM_OPENMP_DIRECTIVE(atomic capture) { nodeIdx = this->NextNode; ++this->NextNode; } #pragma GCC diagnostic pop VTKM_ASSERT(nodeIdx < this->Nodes.size()); return &this->Nodes[nodeIdx]; } bool IsLeaf(const vtkm::Id2& range) { return (range[1] - range[0]) <= this->LeafSize; } // Not an strict midpoint, but ensures that the first range will always be // a multiple of the leaf size. vtkm::Id ComputeMidpoint(const vtkm::Id2& range) { const vtkm::Id n = range[1] - range[0]; const vtkm::Id np = this->LeafSize; return CeilDivide(n / 2, np) * np + range[0]; } void Uniquify(Node* node) { if (!this->IsLeaf(node->InputRange)) { vtkm::Id midpoint = this->ComputeMidpoint(node->InputRange); Node* right = this->AllocNode(); Node* left = this->AllocNode(); right->InputRange = vtkm::Id2(midpoint, node->InputRange[1]); // Intel compilers seem to have trouble following the 'this' pointer // when launching tasks, resulting in a corrupt task environment. // Explicitly copying the pointer into a local variable seems to fix this. auto explicitThis = this; VTKM_OPENMP_DIRECTIVE(taskgroup) { VTKM_OPENMP_DIRECTIVE(task) { explicitThis->Uniquify(right); } left->InputRange = vtkm::Id2(node->InputRange[0], midpoint); this->Uniquify(left); } // end taskgroup. Both sides of the tree will be completed here. // Combine the ranges in the left side: if (this->Predicate(this->Data[left->OutputRange[1] - 1], this->Data[right->OutputRange[0]])) { ++right->OutputRange[0]; } vtkm::Id numVals = right->OutputRange[1] - right->OutputRange[0]; DoCopy(this->Data + right->OutputRange[0], this->Data + left->OutputRange[1], numVals); node->OutputRange[0] = left->OutputRange[0]; node->OutputRange[1] = left->OutputRange[1] + numVals; } else { auto start = this->Data + node->InputRange[0]; auto end = this->Data + node->InputRange[1]; end = std::unique(start, end, this->Predicate); node->OutputRange[0] = node->InputRange[0]; node->OutputRange[1] = node->InputRange[0] + static_cast<vtkm::Id>(end - start); } } }; } } } // end namespace vtkm::cont::openmp #endif // vtk_m_cont_openmp_internal_FunctorsOpenMP_h
ast-dump-openmp-single.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(void) { #pragma omp single ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-single.c:3:1, line:6:1> line:3:6 test 'void (void)' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:6:1> // CHECK-NEXT: `-OMPSingleDirective {{.*}} <line:4:1, col:19> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: |-NullStmt {{.*}} <col:3> // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-single.c:4:1) *const restrict'
compute.h
typedef int index_t; typedef double value_t; #define LEN16 16 #define LEN8 8 typedef __attribute__((aligned(64))) union zmmi { __m512i reg; unsigned int elems[LEN16]; } zmmi_t; typedef __attribute__((aligned(64))) union zmmd { __m512d reg; __m512i regi32; double elems[LEN8]; } zmmd_t; int count_trailing_zero(int a, __mmask8 x) { int idx = a+1; __mmask8 mask[8] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80}; while((x & mask[idx]) == 0) { idx ++; } return idx; } void compute_spmv(int n_threads, int num_vectors, int threads_per_core, int num_panels, panel_info_t *restrict panel_info, thr_info_t *restrict thr_info, index_t *restrict veceor_ptr, uint8_t *restrict scan_mask, index_t *restrict row_arr, index_t *restrict col_arr, value_t *restrict vals_arr, value_t *restrict input, value_t *restrict result) { #pragma omp parallel default(shared) num_threads(n_threads) { int id = omp_get_thread_num(); int core_id = id / threads_per_core; int local_thr_id = id % threads_per_core; int panel_id = thr_info[id].panel_id; value_t *tmp_result = panel_info[panel_id].tmp_result; index_t start_vec = thr_info[id].start_vec; index_t end_vec = thr_info[id].end_vec; zmmi_t row, col, wrmask; zmmd_t res, tmp; __mmask8 mask1, mask2, mask3, maskwr; index_t veceor_idx = thr_info[id].vbase; index_t scan_idx = thr_info[id].sbase; index_t ridx = thr_info[id].rbase; index_t vec_idx = start_vec * LEN8; value_t nrval = 0; index_t eor_vec = veceor_ptr[veceor_idx++]; res.elems[:] = 0; for (index_t v = start_vec; v < end_vec; ++v) { col.elems[0:LEN8] = col_arr[vec_idx:LEN8]; __assume_aligned(&vals_arr[vec_idx], 64); res.elems[0:LEN8] += vals_arr[vec_idx:LEN8] * input[col.elems[0:LEN8]]; vec_idx += LEN8; nrval = 0; if (v == eor_vec) { mask1 = (__mmask8)scan_mask[scan_idx++]; mask2 = (__mmask8)scan_mask[scan_idx++]; mask3 = (__mmask8)scan_mask[scan_idx++]; maskwr = (__mmask8)scan_mask[scan_idx++]; res.reg = _mm512_mask_add_pd(res.reg, mask1, res.reg, _mm512_swizzle_pd(res.reg, _MM_SWIZ_REG_CDAB)); res.reg = _mm512_mask_add_pd(res.reg, mask2, res.reg, _mm512_swizzle_pd(res.reg, _MM_SWIZ_REG_BBBB)); tmp.regi32 = _mm512_permute4f128_epi32(res.regi32, _MM_PERM_BBBA); res.reg = _mm512_mask_add_pd(res.reg, mask3, res.reg, _mm512_swizzle_pd(tmp.reg, _MM_SWIZ_REG_BBBB)); if ((maskwr & 0x80) == 0) nrval = res.elems[LEN8-1]; int bcnt = _mm_countbits_32(maskwr); // int a = -1; int a = -1; int x = maskwr; for (int i = 0; i < bcnt; ++i) { // int y = _mm_tzcnti_32(a, x); int y = count_trailing_zero(a,maskwr); index_t r = row_arr[ridx+i]; tmp_result[r] += res.elems[y]; a = y; } ridx += bcnt; eor_vec = veceor_ptr[veceor_idx++]; } else { res.reg = _mm512_add_pd(res.reg, _mm512_swizzle_pd(res.reg, _MM_SWIZ_REG_CDAB)); res.reg = _mm512_add_pd(res.reg, _mm512_swizzle_pd(res.reg, _MM_SWIZ_REG_BBBB)); nrval = res.elems[LEN8-1] + res.elems[3]; } res.elems[:] = 0; res.elems[0] = nrval; } #pragma omp barrier index_t nridx = thr_info[id].last_row; nrval = tmp_result[thr_info[id].overflow_row]; #pragma omp atomic update tmp_result[nridx] += nrval; #pragma omp barrier index_t merge_start = thr_info[id].merge_start; index_t merge_end = thr_info[id].merge_end; index_t blk_size = 512; for (index_t i = merge_start; i < merge_end; i += blk_size) { index_t blk_end = i + blk_size > merge_end ? merge_end : i + blk_size; for (int c = 0; c < num_panels; ++c) { for (index_t b = i; b < blk_end; b += LEN8) { result[b:LEN8] += panel_info[c].tmp_result[b:LEN8]; } } } } } void compute_spmv1(int n_threads, int num_vectors, thr_info_t *restrict thr_info, index_t *restrict veceor_ptr, uint8_t *restrict scan_mask, index_t *restrict row_arr, index_t *restrict col_arr, value_t *restrict vals_arr, value_t *restrict input, value_t *restrict result) { #pragma omp parallel default(shared) num_threads(n_threads) { int id = omp_get_thread_num(); index_t start_vec = thr_info[id].start_vec; index_t end_vec = thr_info[id].end_vec; zmmi_t row, col, wrmask; zmmd_t res, tmp; __mmask8 mask1, mask2, mask3, maskwr; index_t cidx = thr_info[id].vbase; index_t veceor_idx = thr_info[id].vbase; index_t scan_idx = thr_info[id].vbase * 4; index_t ridx = thr_info[id].rbase; index_t vec_idx = start_vec * LEN8; value_t nrval = 0; index_t eor_vec = veceor_ptr[veceor_idx++]; res.elems[:] = 0; // std::cout<<" start = "<< start_vec <<"; end = "<< end_vec<<endl; for (index_t v = start_vec; v < end_vec; ++v) { col.elems[0:LEN8] = col_arr[vec_idx:LEN8]; __assume_aligned(&vals_arr[vec_idx], 64); res.elems[0:LEN8] += vals_arr[vec_idx:LEN8] * input[col.elems[0:LEN8]]; vec_idx += LEN8; nrval = 0; if (v == eor_vec) { mask1 = (__mmask8)scan_mask[scan_idx++]; mask2 = (__mmask8)scan_mask[scan_idx++]; mask3 = (__mmask8)scan_mask[scan_idx++]; maskwr = (__mmask8)scan_mask[scan_idx++]; res.reg = _mm512_mask_add_pd(res.reg, mask1, res.reg, _mm512_swizzle_pd(res.reg, _MM_SWIZ_REG_CDAB)); res.reg = _mm512_mask_add_pd(res.reg, mask2, res.reg, _mm512_swizzle_pd(res.reg, _MM_SWIZ_REG_BBBB)); tmp.regi32 = _mm512_permute4f128_epi32(res.regi32, _MM_PERM_BBBA); res.reg = _mm512_mask_add_pd(res.reg, mask3, res.reg, _mm512_swizzle_pd(tmp.reg, _MM_SWIZ_REG_BBBB)); if ((maskwr & 0x80) == 0) nrval = res.elems[LEN8-1]; int bcnt = _mm_countbits_32(maskwr); // int a = -1; int a = -1; int x = maskwr; for (int i = 0; i < bcnt; ++i) { // int y = _mm_tzcnti_32(a, x); int y = count_trailing_zero(a,maskwr); // std::cout<<"bcnt = "<< bcnt<<"; y = "<< y<<"; v= "<< v<<"; start = "<< start_vec<<"; end = "<< end_vec<<endl; index_t r = row_arr[ridx+i]; result[r] += res.elems[y]; a = y; } ridx += bcnt; eor_vec = veceor_ptr[veceor_idx++]; } else { // res.reg = _mm512_add_pd(res.reg, _mm512_swizzle_pd(res.reg, _MM_SWIZ_REG_CDAB)); // res.reg = _mm512_add_pd(res.reg, _mm512_swizzle_pd(res.reg, _MM_SWIZ_REG_BBBB)); // nrval = res.elems[LEN8-1] + res.elems[3]; nrval = _mm512_reduce_add_pd(res.reg); } res.elems[:] = 0; res.elems[0] = nrval; } #pragma omp barrier index_t nridx = thr_info[id].last_row; nrval = result[thr_info[id].overflow_row]; #pragma omp atomic update result[nridx] += nrval; } } void run_spmv_vhcc1(int n_threads, int num_vectors, thr_info_t *restrict thr_info, index_t *restrict veceor_ptr, uint8_t *restrict scan_mask, index_t *restrict row_arr, index_t *restrict col_arr, value_t *restrict vals_arr, value_t *restrict input, value_t *restrict result, int iters) { for (int i = 0; i < iters; ++i) { compute_spmv1(n_threads, num_vectors, thr_info, veceor_ptr, scan_mask, row_arr, col_arr, vals_arr, input, result); } } void run_spmv_vhcc(int n_threads, int num_vectors, int threads_per_core, int num_panels, panel_info_t *restrict panel_info, thr_info_t *restrict thr_info, index_t *restrict veceor_ptr, uint8_t *restrict scan_mask, index_t *restrict row_arr, index_t *restrict col_arr, value_t *restrict vals_arr, value_t *restrict input, value_t *restrict result, int iters) { for (int i = 0; i < iters; ++i) { compute_spmv(n_threads, num_vectors, threads_per_core, num_panels, panel_info, thr_info, veceor_ptr, scan_mask, row_arr, col_arr, vals_arr, input, result); } }
shared-clause.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif main(){ int i,n=7; int a[n]; for(i=0;i<n;i++) a[i]=i+1; #pragma omp parallel for default (none) shared (a , n) for(i=0; i<n; i++) a[i]+=i; printf("Después de parallel for:\n"); for(i=0; i<n; i++) printf("a[%d]= %d\n",i,a[i]); }
gbdt.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_BOOSTING_GBDT_H_ #define LIGHTGBM_BOOSTING_GBDT_H_ #include <LightGBM/boosting.h> #include <LightGBM/objective_function.h> #include <LightGBM/prediction_early_stop.h> #include <string> #include <algorithm> #include <cstdio> #include <fstream> #include <map> #include <memory> #include <mutex> #include <unordered_map> #include <utility> #include <vector> #include <LightGBM/json11.hpp> #include "score_updater.hpp" using namespace json11; namespace LightGBM { /*! * \brief GBDT algorithm implementation. including Training, prediction, bagging. */ class GBDT : public GBDTBase { public: /*! * \brief Constructor */ GBDT(); /*! * \brief Destructor */ ~GBDT(); /*! * \brief Initialization logic * \param gbdt_config Config for boosting * \param train_data Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void Init(const Config* gbdt_config, const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Merge model from other boosting object. Will insert to the front of current boosting object * \param other */ void MergeFrom(const Boosting* other) override { auto other_gbdt = reinterpret_cast<const GBDT*>(other); // tmp move to other vector auto original_models = std::move(models_); models_ = std::vector<std::unique_ptr<Tree>>(); // push model from other first for (const auto& tree : other_gbdt->models_) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; // push model in current object for (const auto& tree : original_models) { auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get()))); models_.push_back(std::move(new_tree)); } num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; } void ShuffleModels(int start_iter, int end_iter) override { int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_; start_iter = std::max(0, start_iter); if (end_iter <= 0) { end_iter = total_iter; } end_iter = std::min(total_iter, end_iter); auto original_models = std::move(models_); std::vector<int> indices(total_iter); for (int i = 0; i < total_iter; ++i) { indices[i] = i; } Random tmp_rand(17); for (int i = start_iter; i < end_iter - 1; ++i) { int j = tmp_rand.NextShort(i + 1, end_iter); std::swap(indices[i], indices[j]); } models_ = std::vector<std::unique_ptr<Tree>>(); for (int i = 0; i < total_iter; ++i) { for (int j = 0; j < num_tree_per_iteration_; ++j) { int tree_idx = indices[i] * num_tree_per_iteration_ + j; auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get()))); models_.push_back(std::move(new_tree)); } } } /*! * \brief Reset the training data * \param train_data New Training data * \param objective_function Training objective function * \param training_metrics Training metrics */ void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function, const std::vector<const Metric*>& training_metrics) override; /*! * \brief Reset Boosting Config * \param gbdt_config Config for boosting */ void ResetConfig(const Config* gbdt_config) override; /*! * \brief Adding a validation dataset * \param valid_data Validation dataset * \param valid_metrics Metrics for validation dataset */ void AddValidDataset(const Dataset* valid_data, const std::vector<const Metric*>& valid_metrics) override; /*! * \brief Perform a full training procedure * \param snapshot_freq frequence of snapshot * \param model_output_path path of model file */ void Train(int snapshot_freq, const std::string& model_output_path) override; void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override; /*! * \brief Training logic * \param gradients nullptr for using default objective, otherwise use self-defined boosting * \param hessians nullptr for using default objective, otherwise use self-defined boosting * \return True if cannot train any more */ bool TrainOneIter(const score_t* gradients, const score_t* hessians) override; /*! * \brief Rollback one iteration */ void RollbackOneIter() override; /*! * \brief Get current iteration */ int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; } /*! * \brief Can use early stopping for prediction or not * \return True if cannot use early stopping for prediction */ bool NeedAccuratePrediction() const override { if (objective_function_ == nullptr) { return true; } else { return objective_function_->NeedAccuratePrediction(); } } /*! * \brief Get evaluation result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return evaluation result */ std::vector<double> GetEvalAt(int data_idx) const override; /*! * \brief Get current training score * \param out_len length of returned score * \return training score */ const double* GetTrainingScore(int64_t* out_len) override; /*! * \brief Get size of prediction at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \return The size of prediction */ int64_t GetNumPredictAt(int data_idx) const override { CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size())); data_size_t num_data = train_data_->num_data(); if (data_idx > 0) { num_data = valid_score_updater_[data_idx - 1]->num_data(); } return num_data * num_class_; } /*! * \brief Get prediction result at data_idx data * \param data_idx 0: training data, 1: 1st validation data * \param result used to store prediction result, should allocate memory before call this function * \param out_len length of returned score */ void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override; /*! * \brief Get number of prediction for one data * \param num_iteration number of used iterations * \param is_pred_leaf True if predicting leaf index * \param is_pred_contrib True if predicting feature contribution * \return number of prediction */ inline int NumPredictOneRow(int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override { int num_preb_in_one_row = num_class_; if (is_pred_leaf) { int max_iteration = GetCurrentIteration(); if (num_iteration > 0) { num_preb_in_one_row *= static_cast<int>(std::min(max_iteration, num_iteration)); } else { num_preb_in_one_row *= max_iteration; } } else if (is_pred_contrib) { num_preb_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline } return num_preb_in_one_row; } void PredictRaw(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void Predict(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; void PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const override; void PredictLeafIndex(const double* features, double* output) const override; void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override; void PredictContrib(const double* features, double* output, const PredictionEarlyStopInstance* earlyStop) const override; /*! * \brief Dump model to json format string * \param start_iteration The model will be saved start from * \param num_iteration Number of iterations that want to dump, -1 means dump all * \return Json format string of model */ std::string DumpModel(int start_iteration, int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \return if-else format codes of model */ std::string ModelToIfElse(int num_iteration) const override; /*! * \brief Translate model to if-else statement * \param num_iteration Number of iterations that want to translate, -1 means translate all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToIfElse(int num_iteration, const char* filename) const override; /*! * \brief Save model to file * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \param filename Filename that want to save to * \return is_finish Is training finished or not */ bool SaveModelToFile(int start_iteration, int num_iterations, const char* filename) const override; /*! * \brief Save model to string * \param start_iteration The model will be saved start from * \param num_iterations Number of model that want to save, -1 means save all * \return Non-empty string if succeeded */ std::string SaveModelToString(int start_iteration, int num_iterations) const override; /*! * \brief Restore from a serialized buffer */ bool LoadModelFromString(const char* buffer, size_t len) override; /*! * \brief Calculate feature importances * \param num_iteration Number of model that want to use for feature importance, -1 means use all * \param importance_type: 0 for split, 1 for gain * \return vector of feature_importance */ std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override; /*! * \brief Get max feature index of this model * \return Max feature index of this model */ inline int MaxFeatureIdx() const override { return max_feature_idx_; } /*! * \brief Get feature names of this model * \return Feature names of this model */ inline std::vector<std::string> FeatureNames() const override { return feature_names_; } /*! * \brief Get index of label column * \return index of label column */ inline int LabelIdx() const override { return label_idx_; } /*! * \brief Get number of weak sub-models * \return Number of weak sub-models */ inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); } /*! * \brief Get number of tree per iteration * \return number of tree per iteration */ inline int NumModelPerIteration() const override { return num_tree_per_iteration_; } /*! * \brief Get number of classes * \return Number of classes */ inline int NumberOfClasses() const override { return num_class_; } inline void InitPredict(int num_iteration, bool is_pred_contrib) override { num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_; if (num_iteration > 0) { num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_); } if (is_pred_contrib) { #pragma omp parallel for schedule(static) for (int i = 0; i < static_cast<int>(models_.size()); ++i) { models_[i]->RecomputeMaxDepth(); } } } inline double GetLeafValue(int tree_idx, int leaf_idx) const override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); return models_[tree_idx]->LeafOutput(leaf_idx); } inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override { CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size()); CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves()); models_[tree_idx]->SetLeafOutput(leaf_idx, val); } /*! * \brief Get Type name of this boosting object */ const char* SubModelName() const override { return "tree"; } protected: /*! * \brief Print eval result and check early stopping */ virtual bool EvalAndCheckEarlyStopping(); /*! * \brief reset config for bagging */ void ResetBaggingConfig(const Config* config, bool is_change_dataset); /*! * \brief Implement bagging logic * \param iter Current interation */ virtual void Bagging(int iter); /*! * \brief Helper function for bagging, used for multi-threading optimization * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief Helper function for bagging, used for multi-threading optimization, balanced sampling * \param start start indice of bagging * \param cnt count * \param buffer output buffer * \return count of left size */ data_size_t BalancedBaggingHelper(Random* cur_rand, data_size_t start, data_size_t cnt, data_size_t* buffer); /*! * \brief calculate the object function */ virtual void Boosting(); /*! * \brief updating score after tree was trained * \param tree Trained tree of this iteration * \param cur_tree_id Current tree for multiclass training */ virtual void UpdateScore(const Tree* tree, const int cur_tree_id); /*! * \brief eval results for one metric */ virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const; /*! * \brief Print metric result of current iteration * \param iter Current interation * \return best_msg if met early_stopping */ std::string OutputMetric(int iter); double BoostFromAverage(int class_id, bool update_scorer); /*! \brief current iteration */ int iter_; /*! \brief Pointer to training data */ const Dataset* train_data_; /*! \brief Config of gbdt */ std::unique_ptr<Config> config_; /*! \brief Tree learner, will use this class to learn trees */ std::unique_ptr<TreeLearner> tree_learner_; /*! \brief Objective function */ const ObjectiveFunction* objective_function_; /*! \brief Store and update training data's score */ std::unique_ptr<ScoreUpdater> train_score_updater_; /*! \brief Metrics for training data */ std::vector<const Metric*> training_metrics_; /*! \brief Store and update validation data's scores */ std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_; /*! \brief Metric for validation data */ std::vector<std::vector<const Metric*>> valid_metrics_; /*! \brief Number of rounds for early stopping */ int early_stopping_round_; /*! \brief Only use first metric for early stopping */ bool es_first_metric_only_; /*! \brief Best iteration(s) for early stopping */ std::vector<std::vector<int>> best_iter_; /*! \brief Best score(s) for early stopping */ std::vector<std::vector<double>> best_score_; /*! \brief output message of best iteration */ std::vector<std::vector<std::string>> best_msg_; /*! \brief Trained models(trees) */ std::vector<std::unique_ptr<Tree>> models_; /*! \brief Max feature index of training data*/ int max_feature_idx_; /*! \brief First order derivative of training data */ std::vector<score_t> gradients_; /*! \brief Secend order derivative of training data */ std::vector<score_t> hessians_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> bag_data_indices_; /*! \brief Number of in-bag data */ data_size_t bag_data_cnt_; /*! \brief Store the indices of in-bag data */ std::vector<data_size_t> tmp_indices_; /*! \brief Number of training data */ data_size_t num_data_; /*! \brief Number of trees per iterations */ int num_tree_per_iteration_; /*! \brief Number of class */ int num_class_; /*! \brief Index of label column */ data_size_t label_idx_; /*! \brief number of used model */ int num_iteration_for_pred_; /*! \brief Shrinkage rate for one iteration */ double shrinkage_rate_; /*! \brief Number of loaded initial models */ int num_init_iteration_; /*! \brief Feature names */ std::vector<std::string> feature_names_; std::vector<std::string> feature_infos_; /*! \brief number of threads */ int num_threads_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> offsets_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_cnts_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> left_write_pos_buf_; /*! \brief Buffer for multi-threading bagging */ std::vector<data_size_t> right_write_pos_buf_; std::unique_ptr<Dataset> tmp_subset_; bool is_use_subset_; std::vector<bool> class_need_train_; bool is_constant_hessian_; std::unique_ptr<ObjectiveFunction> loaded_objective_; bool average_output_; bool need_re_bagging_; bool balanced_bagging_; std::string loaded_parameter_; std::vector<int8_t> monotone_constraints_; Json forced_splits_json_; }; } // namespace LightGBM #endif // LightGBM_BOOSTING_GBDT_H_
LAGraph_pagerank3b.c
//------------------------------------------------------------------------------ // LAGraph_pagerank3b: pagerank using a real semiring //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2019 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ // LAGraph_pagerank3b: Alternative PageRank implementation using a real // semiring. // // This algorithm follows the specification given in the GAP Benchmark Suite: // https://arxiv.org/abs/1508.03619 // For fastest results, the input matrix should be GrB_FP32, stored in // GxB_BY_COL format. #include "LAGraph.h" #define LAGRAPH_FREE_ALL { \ GrB_free(&transpose_desc); \ GrB_free(&invmask_desc); \ GrB_free(&A); \ GrB_free(&G); \ GrB_free(&grb_d_out); \ GrB_free(&importance_vec); \ GrB_free(&grb_pr); \ }; // uncomment this to see the intermidiate resluts; lots of prints!! //#undef NDEBUG // uncomment this to see the timing info #define PRINT_TIMING_INFO GrB_Info LAGraph_pagerank3b // PageRank definition ( GrB_Vector *result, // output: array of LAGraph_PageRank structs GrB_Matrix A_input, // binary input graph, not modified float damping_factor, // damping factor unsigned long itermax, // maximum number of iterations int* iters // output: number of iterations taken ) { GrB_Info info; GrB_Index n; GrB_Descriptor invmask_desc = NULL ; GrB_Descriptor transpose_desc = NULL ; GrB_Vector grb_d_out = NULL ; GrB_Matrix A = NULL ; #ifdef PRINT_TIMING_INFO // start the timer double tic [2] ; LAGraph_tic (tic) ; #endif GrB_Vector importance_vec = NULL ; GrB_Vector grb_pr = NULL; GrB_Matrix G = NULL ; // a dense row of zeros zeroes(1,n) GrB_Index ncols ; //number of columnns LAGRAPH_OK(GrB_Matrix_ncols(&ncols , A_input)); LAGRAPH_OK(GrB_Matrix_nrows(&n, A_input)); GrB_Index nvals; LAGRAPH_OK(GrB_Matrix_nvals(&nvals, A_input)); if (ncols != n) { return (GrB_DIMENSION_MISMATCH) ; } LAGRAPH_OK(GrB_Matrix_new (&G, GrB_FP32, n, n)); LAGRAPH_OK(GrB_Matrix_new (&A, GrB_FP32, n, n)); LAGRAPH_OK(GxB_set (A, GxB_FORMAT, GxB_BY_COL)); // G is zeros in last row for (GrB_Index c = 0; c < n; c++){ LAGRAPH_OK(GrB_Matrix_setElement (G, 0.0, n-1, c)); } #ifndef NDEBUG int print_size = 5; //number of entries get printed print_size = (print_size > n)? n : print_size; // GxB_print (G, 3) ; #endif // A = A_input + G; LAGRAPH_OK(GrB_eWiseAdd (A, NULL, NULL, GrB_PLUS_FP32, A_input, G, NULL)); GrB_free (&G) ; #ifndef NDEBUG // GxB_print (A, 3) ; #endif // Create complement descriptor LAGRAPH_OK(GrB_Descriptor_new(&invmask_desc)); LAGRAPH_OK(GrB_Descriptor_set(invmask_desc, GrB_MASK, GrB_SCMP)); // Create transpose descriptor LAGRAPH_OK(GrB_Descriptor_new(&transpose_desc)); LAGRAPH_OK(GrB_Descriptor_set(transpose_desc, GrB_INP0, GrB_TRAN)); LAGRAPH_OK(GrB_Descriptor_set(transpose_desc, GrB_OUTP, GrB_REPLACE)); // Matrix A row sum // Stores the outbound degrees of all vertices LAGRAPH_OK(GrB_Vector_new(&grb_d_out, GrB_FP32, n)); LAGRAPH_OK(GrB_reduce( grb_d_out, NULL, NULL, GxB_PLUS_FP32_MONOID, A, NULL )); #ifndef NDEBUG GxB_print (grb_d_out, 1) ; // GxB_print (A, 3) ; #endif // Iteration // Initialize PR vector LAGRAPH_OK(GrB_Vector_new(&grb_pr, GrB_FP32, n)); LAGRAPH_OK(GrB_Vector_new(&importance_vec, GrB_FP32, n)); // Teleport value const float teleport = (1 - damping_factor) / n; float tol = 1e-4; float rdiff = 1 ; // first iteration is always done GrB_Type type = GrB_FP32 ; GrB_Index *dI = NULL ; float *d_sp= NULL ; GrB_Index d_nvals; GrB_Index d_n; // d_sp <----- grb_d_out || export LAGRAPH_OK (GxB_Vector_export (&grb_d_out, &type, &d_n, &d_nvals, &dI, (void **) (&d_sp), NULL)) ; // dens d_out float *d_out = (float *) LAGraph_calloc (n, sizeof(float)); int nthreads = LAGraph_get_nthreads ( ) ; nthreads = LAGRAPH_MIN (n , nthreads) ; nthreads = LAGRAPH_MAX (nthreads, 1) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t i = 0 ; i < d_nvals; i++){ GrB_Index ind = (GrB_Index) dI[i]; d_out [ind] = d_sp [i]; } free (d_sp); free (dI); #ifndef NDEBUG for (int i = 0 ; i < print_size; i++){ printf("d_out [%d]=%g\n", i, d_out [i]); } #endif // initializing pr float *pr = (float *) malloc (n*sizeof(float)); #pragma omp parallel for num_threads(nthreads) schedule(static) for (int i = 0; i < n ; i++){ pr [i] = 1.0/n; } #ifndef NDEBUG for (int i = 0 ; i < print_size ; i++){ printf("pr[%d]=%f\n", i, pr [i]); } #endif float *oldpr = (float *) malloc (n*sizeof(float)); //initailze the dense indices GrB_Index *I = LAGraph_malloc(n, sizeof(GrB_Index)); #pragma omp parallel for num_threads(nthreads) schedule(static) for (GrB_Index j = 0; j < n; j++){ I[j] = j; } #ifdef PRINT_TIMING_INFO // stop the timer double t1 = LAGraph_toc (tic); printf ("\ninitialization time: %12.6e (sec)\n",t1); LAGraph_tic (tic); #endif for ((*iters) = 0 ; (*iters) < itermax && rdiff > tol ; (*iters)++) { // oldpr = pr; deep copy //GrB_Vector_dup(&oldpr, pr); #pragma omp parallel for num_threads(nthreads) schedule(static) for (int i = 0; i < n ; i++){ oldpr [i] = pr [i]; } // Importance calculation #pragma omp parallel for num_threads(nthreads) schedule(static) for (int i = 0 ; i < n; i++){ if (d_out [i] != 0){ pr [i] = damping_factor * pr [i] / d_out [i]; } else{ pr [i] = 0; } } #ifndef NDEBUG for (int i = 0 ; i < print_size; i++){ printf (" pr [%d] = %f\n", i, pr [i]); } #endif // importance_vec <----- pr LAGRAPH_OK (GxB_Vector_import (&importance_vec, GrB_FP32, n, n, &I, (void **) (&pr), NULL)) ; #ifndef NDEBUG printf ("after importance_vec import\n"); GxB_print (importance_vec, 2) ; #endif // Calculate total PR of all inbound vertices // importance_vec = A' * importance_vec LAGRAPH_OK(GrB_mxv( importance_vec, NULL, NULL, GxB_PLUS_TIMES_FP32, A, importance_vec, transpose_desc )); #ifndef NDEBUG printf ("==============2\n"); printf ("after mxv\n"); GxB_print (importance_vec, 1) ; #endif GrB_Index nvals_exp; // pr <----- importance_vec GrB_Type ivtype; LAGRAPH_OK (GxB_Vector_export (&importance_vec, &ivtype, &n, &nvals_exp, &I, (void **) (&pr), NULL)) ; // assert (nvals_exp == n ); // PageRank summarization // Add teleport, importance_vec, and dangling_vec components together // pr = (1-df)/n + pr #pragma omp parallel for num_threads(nthreads) schedule(static) for (int i = 0 ; i < n; i++){ pr [i] += teleport; } #ifndef NDEBUG for (int i = 0 ; i < print_size; i++){ printf (" pr [%d] = %f\n", i, pr [i]); } #endif //---------------------------------------------------------------------- // rdiff = sum ((oldpr-pr).^2) //---------------------------------------------------------------------- rdiff = 0; // norm (oldpr pr, 1) #pragma omp parallel for num_threads(nthreads) reduction(+:rdiff) for (int i = 0 ; i < n; i++){ float d = (oldpr [i] - pr [i]); d = (d > 0 ? d : -d); //abs(d) rdiff += d; } #ifndef NDEBUG printf("---------------------------iters %d rdiff=%f\n",*iters, rdiff); #endif } #ifdef PRINT_TIMING_INFO // stop the timer double t2 = LAGraph_toc (tic); printf ("compuatatin time: %12.6e (sec) ratio (comp/init): %f\n\n", t2, t2/t1); #endif GrB_Index *prI = LAGraph_malloc(n, sizeof(GrB_Index)); // grb_pr<----- pr || import back LAGRAPH_OK (GxB_Vector_import (&grb_pr, GrB_FP32, n, n, &I, (void **) (&pr), NULL)) ; (*result) = grb_pr; free(I); free (oldpr); return (GrB_SUCCESS); }
task_types.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> #include <math.h> __attribute__ ((noinline)) // workaround for bug in icc void print_task_type(int id) { #pragma omp critical { int task_type; char buffer[2048]; ompt_get_task_info(0, &task_type, NULL, NULL, NULL, NULL); format_task_type(task_type, buffer); printf("%" PRIu64 ": id=%d task_type=%s=%d\n", ompt_get_thread_data()->value, id, buffer, task_type); } }; int main() { //initial task print_task_type(0); int x; //implicit task #pragma omp parallel num_threads(1) { print_task_type(1); x++; } #pragma omp parallel num_threads(2) #pragma omp master { //explicit task #pragma omp task { print_task_type(2); x++; } //explicit task with undeferred #pragma omp task if(0) { print_task_type(3); x++; } //explicit task with untied #pragma omp task untied { print_task_type(4); x++; } //explicit task with final #pragma omp task final(1) { print_task_type(5); x++; //nested explicit task with final and undeferred #pragma omp task { print_task_type(6); x++; } } //Mergeable task test deactivated for now //explicit task with mergeable /* #pragma omp task mergeable if((int)sin(0)) { print_task_type(7); x++; } */ //TODO: merged task } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create: parent_task_id=0, parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[NULL]], new_task_id={{[0-9]+}}, codeptr_ra=[[NULL]], task_type=ompt_task_initial=1, has_dependences=no // CHECK-NOT: 0: parallel_data initially not null // CHECK: {{^}}[[MASTER_ID]]: id=0 task_type=ompt_task_initial=1 // CHECK: {{^}}[[MASTER_ID]]: id=1 task_type=ompt_task_implicit|ompt_task_undeferred=134217730 // CHECK-DAG: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit=4, has_dependences=no // CHECK-DAG: {{^[0-9]+}}: id=2 task_type=ompt_task_explicit=4 // CHECK-DAG: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred=134217732, has_dependences=no // CHECK-DAG: {{^[0-9]+}}: id=3 task_type=ompt_task_explicit|ompt_task_undeferred=134217732 // CHECK-DAG: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_untied=268435460, has_dependences=no // CHECK-DAG: {{^[0-9]+}}: id=4 task_type=ompt_task_explicit|ompt_task_untied=268435460 // CHECK-DAG: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_final=536870916, has_dependences=no // CHECK-DAG: {{^[0-9]+}}: id=5 task_type=ompt_task_explicit|ompt_task_final=536870916 // CHECK-DAG: {{^[0-9]+}}: ompt_event_task_create: parent_task_id={{[0-9]+}}, parent_task_frame.exit={{0x[0-f]+}}, parent_task_frame.reenter={{0x[0-f]+}}, new_task_id={{[0-9]+}}, codeptr_ra={{0x[0-f]+}}, task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644, has_dependences=no // CHECK-DAG: {{^[0-9]+}}: id=6 task_type=ompt_task_explicit|ompt_task_undeferred|ompt_task_final=671088644 return 0; }
GB_unaryop__minv_uint64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_bool // op(A') function: GB_tran__minv_uint64_bool // C type: uint64_t // A type: bool // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ bool #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_bool ( uint64_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gemv_c_csr_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> static alphasparse_status_t gemv_csr_trans_serial(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; for (ALPHA_INT i = 0; i < m; ++i) { for (ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i]; ai++) { ALPHA_Number val; cmp_conj(val, A->values[ai]); alpha_mul(val, alpha, val); alpha_madde(y[A->col_indx[ai]], val, x[i]); } } return ALPHA_SPARSE_STATUS_SUCCESS; } static alphasparse_status_t gemv_csr_trans_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_INT partition[thread_num + 1]; balanced_partition_row_by_nnz(A->rows_end, m, thread_num, partition); ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num); #ifdef _OPENMP #pragma omp parallel num_threads(thread_num) #endif { const ALPHA_INT tid = alpha_get_thread_id(); const ALPHA_INT local_m_s = partition[tid]; const ALPHA_INT local_m_e = partition[tid + 1]; tmp[tid] = (ALPHA_Number *)malloc(sizeof(ALPHA_Number) * n); memset(tmp[tid],'\0',sizeof(ALPHA_Number) * n); for (ALPHA_INT i = local_m_s; i < local_m_e; ++i) { const ALPHA_Number x_r = x[i]; int pkl = A->rows_start[i]; int pke = A->rows_end[i]; for (; pkl < pke - 3; pkl += 4) { ALPHA_Number conj0, conj1, conj2, conj3; cmp_conj(conj0, A->values[pkl]); cmp_conj(conj1, A->values[pkl+1]); cmp_conj(conj2, A->values[pkl+2]); cmp_conj(conj3, A->values[pkl+3]); alpha_madde(tmp[tid][A->col_indx[pkl]], conj0, x_r); alpha_madde(tmp[tid][A->col_indx[pkl + 1]], conj1, x_r); alpha_madde(tmp[tid][A->col_indx[pkl + 2]], conj2, x_r); alpha_madde(tmp[tid][A->col_indx[pkl + 3]], conj3, x_r); } for (; pkl < pke; ++pkl) { ALPHA_Number conj0; alpha_conj(conj0, A->values[pkl]); alpha_madde(tmp[tid][A->col_indx[pkl]], conj0, x_r); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < n; ++i) { ALPHA_Number tmp_y; alpha_setzero(tmp_y); for (ALPHA_INT j = 0; j < thread_num; ++j) { alpha_adde(tmp_y, tmp[j][i]); } alpha_mule(y[i],beta); alpha_madde(y[i],alpha,tmp_y); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return gemv_csr_trans_omp(alpha, A, x, beta, y); }
levelset_fluid_solver.h
/* ============================================================================== KratosPFEMApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: antonia $ // Date: $Date: 2009-01-14 16:24:38 $ // Revision: $Revision: 1.11 $ // // #if !defined(KRATOS_LEVELSET_FLUID_SOLVER_H_INCLUDED) #define KRATOS_LEVELSET_FLUID_SOLVER_H_INCLUDED #define SPLIT_OSS // #define SYMM_PRESS // System includes #include <string> #include <iostream> #include <algorithm> // #include <omp.h> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" //#include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "incompressible_fluid_application.h" namespace Kratos { template<unsigned int TDim, class MatrixContainer, class TSparseSpace, class TLinearSolver> class LevelSetFluidSolver { public: //name for the self defined structure typedef EdgesStructureType<TDim> CSR_Tuple; typedef std::vector<CSR_Tuple> EdgesVectorType; //name for row start and column index vectors typedef std::vector<unsigned int> IndicesVectorType; //defining matrix type for test calculations typedef std::vector< array_1d<double, TDim> > CalcVectorType; //defining type for local storage of nodal values typedef std::vector<double> ValuesVectorType; //defining types for matrix operations typedef typename TSparseSpace::MatrixType TSystemMatrixType; typedef typename TSparseSpace::VectorType TSystemVectorType; //constructor and destructor LevelSetFluidSolver(MatrixContainer& mr_matrix_container, ModelPart& mr_model_part, bool include_shock_capturing, bool smooth_convective_velocity ) : mr_matrix_container(mr_matrix_container),mr_model_part(mr_model_part) { //options minclude_shock_capturing = include_shock_capturing; msmooth_convective_velocity = smooth_convective_velocity; }; ~LevelSetFluidSolver() {}; //*********************************** //function to initialize fluid solver void Initialize( ) { KRATOS_TRY //get number of nodes unsigned int n_nodes = mr_model_part.Nodes().size(); unsigned int n_edges = mr_matrix_container.GetNumberEdges(); //size data vectors mWork.resize(n_nodes); mvel_n.resize(n_nodes); mvel_n1.resize(n_nodes); mInitMom.resize(n_nodes); mCurrMom.resize(n_nodes); mPn.resize(n_nodes); mPn1.resize(n_nodes); mViscosity.resize(n_nodes); mRho.resize(n_nodes); mRhoOld.resize(n_nodes); mC2inv.resize(n_nodes); mA.resize(n_nodes); mHmin.resize(n_nodes); mHavg.resize(n_nodes); mNodalFlag.resize(n_nodes); mdistances.resize(n_nodes); mEps.resize(n_nodes); mEpsOld.resize(n_nodes); mD.resize(n_nodes); mTauPressure.resize(n_nodes); mTauConvection.resize(n_nodes); mPi.resize(n_nodes); mXi.resize(n_nodes); mBodyForce.resize(n_nodes); mDrag.resize(n_nodes); mx.resize(n_nodes); mCp.resize(n_nodes); mMach.resize(n_nodes); mEdgeDimensions.resize(n_edges); mBeta.resize(n_edges); for (unsigned int csr_index = 0; csr_index < n_edges; csr_index++) mBeta[csr_index] = 1.0; ValuesVectorType external_pressure; external_pressure.resize(n_nodes); //read velocity and pressure data from Kratos mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase(EXTERNAL_PRESSURE, external_pressure, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase(IS_BOUNDARY, mNodalFlag, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase(DENSITY, mRho, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, mr_model_part.Nodes()); mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, mr_model_part.Nodes()); mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes()); //mr_matrix_container.FillCoordinatesFromDatabase(mx, mr_model_part.Nodes()); //set flag for first time step mFirstStep = true; //loop to categorize boundary nodes for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { //differentiate between types of boundary condition switch (static_cast<unsigned int>(mNodalFlag[i_node])) { case 1: //velocity inlet mVelocityInletList.push_back(i_node); mVelocityInlet.push_back(mvel_n[i_node]); mDensityInlet.push_back(mRho[i_node]); mDissipationList.push_back(i_node); break; case 2: //no-slip condition mNoSlipBoundaryList.push_back(i_node); break; case 3: //slip condition mSlipBoundaryList.push_back(i_node); break; case 4: //mixed condition (slip and pressure node) mPressureOutletList.push_back(i_node); mPressureOutlet.push_back(external_pressure[i_node]); mSlipBoundaryList.push_back(i_node); mDissipationList.push_back(i_node); break; case 5: //pressure outlet mPressureOutletList.push_back(i_node); mPressureOutlet.push_back(external_pressure[i_node]); mDissipationList.push_back(i_node); break; } } //print number of nodes corresponding to the different types of boundary conditions KRATOS_WATCH(mVelocityInletList.size()) KRATOS_WATCH(mDensityInlet.size()) KRATOS_WATCH(mPressureOutletList.size()) KRATOS_WATCH(mSlipBoundaryList.size()) KRATOS_WATCH(mNoSlipBoundaryList.size()) KRATOS_WATCH(mDissipationList.size()) //determine number of edges and entries unsigned int n_nonzero_entries = 2 * n_edges + n_nodes; //allocate memory for variables mL.resize(n_nodes,n_nodes,n_nonzero_entries); //loop over all nodes for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { //flag for considering diagonal matrix elements bool flag = 0; //loop over all neighbours for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; //define matrix structure row by row (the order does matter!) if ((j_neighbour > i_node) && (flag == 0)) { //add diagonal/nodal contribution mL.push_back(i_node, i_node, 0.0); flag = 1; } //add non-diagonal/edge contribution mL.push_back(i_node, j_neighbour, 0.0); } //if diagonal element is the last non-zero element of the row if (flag == 0) mL.push_back(i_node, i_node, 0.0); } //compute area normals CalculateNormals(mr_model_part.Conditions()); // WriteVectorToDatabase(NORMAL, mPressureNormal, mr_model_part.Nodes()); mr_matrix_container.WriteVectorToDatabase(NORMAL, mSlipNormal, mr_model_part.Nodes()); //compute minimum length of the surrounding edges CalculateEdgeLengths(mr_model_part.Nodes()); //prepare initial momentum for first time step for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { double& rho_i = mRho[i_node]; array_1d<double, TDim>& u_i = mvel_n1[i_node]; array_1d<double, TDim>& U_i = mInitMom[i_node]; //compute initial momentum for iteration of step 1 for (unsigned int component = 0; component < TDim; component++) U_i[component] = rho_i * u_i[component]; } KRATOS_CATCH("") } //*************************************** //function to set adequate time step size void ComputeTimeStep(double CFLNumber) { KRATOS_TRY //local variable for time step size double delta_t = 1e10; //getting value of current velocity and of viscosity mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase(VISCOSITY, mViscosity, mr_model_part.Nodes()); mr_matrix_container.FillCoordinatesFromDatabase(mx, mr_model_part.Nodes()); //******************* //loop over all nodes double n_nodes = mvel_n1.size(); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& v_i = mvel_n1[i_node]; // KRATOS_WATCH(v_i); array_1d<double, TDim>& x_i = mx[i_node]; // KRATOS_WATCH(x_i); //use CFL condition to compute time step size double delta_t_i = CFLNumber * 1.0 / (norm_2(v_i)/mHmin[i_node] + 2.0 * mViscosity[i_node]/(mHmin[i_node]*mHmin[i_node]) ); //considering the most restrictive case of neighbor's velocities with similar direction but opposite sense. //loop over all neighbours for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; array_1d<double, TDim>& v_j = mvel_n1[j_neighbour]; array_1d<double, TDim>& x_j = mx[j_neighbour]; array_1d<double, TDim> edge_dir = ZeroVector(TDim); // KRATOS_WATCH(x_j); // KRATOS_WATCH(v_j); //Calculate edge direction edge_dir[0] = x_j[0]-x_i[0]; edge_dir[1] = x_j[1]-x_i[1]; edge_dir[2] = x_j[2]-x_i[2]; // KRATOS_WATCH(edge_dir); double aux = norm_2(edge_dir); // KRATOS_WATCH(aux); if (aux == 0.0) { edge_dir = ZeroVector(TDim); } else { //normalized edge direction edge_dir /= norm_2(edge_dir); // KRATOS_WATCH(edge_dir); } //int aux = inner_prod(v_i,v_j); double v_i_par = inner_prod(v_i, edge_dir); double v_j_par = inner_prod(v_j, edge_dir); // KRATOS_WATCH(v_i_par); // KRATOS_WATCH(v_j_par); if ((v_i_par >= 0.0 && v_j_par <= 0.0) || (v_i_par <= 0.0 && v_j_par >= 0.0)) { double delta_t_j = CFLNumber / ((fabs(v_i_par) + fabs(v_j_par))/mHmin[i_node] + 2.0 * mViscosity[i_node]/(mHmin[i_node]*mHmin[i_node])); // KRATOS_WATCH(delta_t_j); // KRATOS_WATCH(delta_t_i); if (delta_t_j < delta_t_i) delta_t_i = delta_t_j; } } //choose the overall minimum of delta_t_i if (delta_t_i < delta_t) delta_t = delta_t_i; } //******************* //perform MPI syncronization of the dt (minimum should be kept) //write time step size to Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); CurrentProcessInfo[DELTA_TIME] = delta_t; KRATOS_CATCH("") } //********************************************************************************** //function to solve fluid equations - fractional step 1: compute fractional momentum Vector SolveStep1() { KRATOS_TRY //PREREQUISITES //variables for node based data handling ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //storage of nodal values in local variables CalcVectorType rhs; rhs.resize(n_nodes); //read velocity and pressure data from Kratos mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes); mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes); mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, rNodes); mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, rNodes); mr_matrix_container.FillScalarFromDatabase(DENSITY, mRho, rNodes); mr_matrix_container.FillOldScalarFromDatabase(DENSITY, mRhoOld, rNodes); mr_matrix_container.FillVectorFromDatabase(BODY_FORCE, mBodyForce, rNodes); mr_matrix_container.FillScalarFromDatabase(VISCOSITY, mViscosity, rNodes); mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes()); mr_matrix_container.FillScalarFromDatabase(POROSITY, mEps, rNodes); mr_matrix_container.FillOldScalarFromDatabase(POROSITY, mEpsOld, rNodes); //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; #pragma omp parallel for for ( int i_node = 0; i_node < n_nodes; i_node++) { // -> mCurrMom //compute the momentum at the current step -> mCurrMom double& rho_i = mRho[i_node]; const array_1d<double, TDim>& u_i = mvel_n1[i_node]; array_1d<double, TDim>& U_i = mCurrMom[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) U_i[comp] = rho_i * u_i[comp]; // -> mInitMom double& rho_i_old = mRhoOld[i_node]; //compute the momentum at the beginning of the step const array_1d<double, TDim>& u_i_old = mvel_n[i_node]; array_1d<double, TDim>& U_i_old = mInitMom[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) U_i_old[comp] = rho_i_old * u_i_old[comp]; //compute volumetric body force array_1d<double, TDim>& f_i = mBodyForce[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) f_i[comp] *= rho_i; } DivideByPorosity(mCurrMom, mCurrMom, mEps); DivideByPorosity(mInitMom, mInitMom, mEpsOld); DivideByPorosity(mvel_n, mvel_n, mEpsOld); DivideByPorosity(mvel_n1, mvel_n1, mEps); //compute advective velocity - area average of the current velocity CalculateAdvectiveVelocity(mvel_n1, mA, msmooth_convective_velocity); //compute intrinsic time double time_inv = 1.0/delta_t; // time_inv = 0.0; #pragma omp parallel for firstprivate(time_inv) for (int i_node = 0; i_node < n_nodes; i_node++) { // double& h_i = mHavg[i_node]; double& h_i = mHmin[i_node]; array_1d<double, TDim>& a_i = mA[i_node]; const double nu_i = mViscosity[i_node]; // mTau[i_node] = 1.0 / (0.5 * norm_2(a_i)/h_i + time_inv); double vel_norm = norm_2(a_i); // mTauPressure[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv + nu_i /(h_i*h_i) ); mTauPressure[i_node] = delta_t; mTauConvection[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv + nu_i /(h_i*h_i) ); if (mTauPressure[i_node] < delta_t) mTauPressure[i_node] = delta_t; else if(mTauPressure[i_node] > 100.0*delta_t) mTauPressure[i_node] = 100.0*delta_t; } //compute pressure switch if (mFirstStep == false) if(minclude_shock_capturing == true) ComputeMonotonicityPreserving(); mr_matrix_container.AssignVectorToVector(mInitMom,mWork); //mWork = mvel_n NO!!!-> mWork = mU_iold //first step of Runge Kutta mr_matrix_container.AssignVectorToVector(mvel_n,mvel_n1); //mvel_n1 = mvel_n mr_matrix_container.AssignVectorToVector(mInitMom,mCurrMom); // double start_prod = omp_get_wtime(); CalculateAdvectiveVelocity(mvel_n1, mA, msmooth_convective_velocity); mr_matrix_container.SetToZero(rhs); CalculateRHS( mCurrMom, mPn1, mA, mBodyForce, mViscosity, rhs); /*double norma=0.0; for (int i_node = 0; i_node < n_nodes; i_node++) for (int kkk = 0; kkk < TDim; kkk++) norma += rhs[i_node][kkk]*rhs[i_node][kkk]; KRATOS_WATCH(norma);*/ mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/6.0 , mr_matrix_container.GetInvertedMass(), rhs); mr_matrix_container.Add_Minv_value(mCurrMom,mInitMom, 0.5*delta_t , mr_matrix_container.GetInvertedMass(), rhs); ApplyVelocityBC(mCurrMom); /*mr_matrix_container.WriteVectorToDatabase(CONV_PROJ, mA, rNodes); mr_matrix_container.WriteScalarToDatabase(TEMPERATURE, mTauConvection, rNodes);*/ //second step CalculateVelocity(mvel_n1,mCurrMom,mRho); CalculateAdvectiveVelocity( mvel_n1, mA, msmooth_convective_velocity); mr_matrix_container.SetToZero(rhs); CalculateRHS( mCurrMom, mPn1, mA, mBodyForce,mViscosity, rhs ); mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/3.0 , mr_matrix_container.GetInvertedMass(), rhs); mr_matrix_container.Add_Minv_value(mCurrMom,mInitMom, 0.5*delta_t , mr_matrix_container.GetInvertedMass(),rhs); ApplyVelocityBC(mCurrMom); //third step CalculateVelocity(mvel_n1,mCurrMom,mRho); CalculateAdvectiveVelocity( mvel_n1, mA, msmooth_convective_velocity); mr_matrix_container.SetToZero(rhs); CalculateRHS( mCurrMom, mPn1, mA, mBodyForce,mViscosity, rhs); mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/3.0 , mr_matrix_container.GetInvertedMass(), rhs); mr_matrix_container.Add_Minv_value(mCurrMom,mInitMom, delta_t , mr_matrix_container.GetInvertedMass(), rhs); ApplyVelocityBC(mCurrMom); //fourth step CalculateVelocity(mvel_n1,mCurrMom,mRho); CalculateAdvectiveVelocity( mvel_n1, mA, msmooth_convective_velocity); mr_matrix_container.SetToZero(rhs); CalculateRHS( mCurrMom, mPn1, mA, mBodyForce,mViscosity, rhs ); mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/6.0 , mr_matrix_container.GetInvertedMass(), rhs); ApplyVelocityBC(mCurrMom); //compute right-hand side mr_matrix_container.AssignVectorToVector(mWork,mCurrMom); ApplyVelocityBC(mCurrMom); // //compute ratio for iteration Vector stop_criteria(TDim); noalias(stop_criteria) = ZeroVector(TDim); // stop_criteria[0] = 0.0; // stop_criteria[1] = 0.0; return stop_criteria; KRATOS_CATCH("") } //********************************************************************* //function to calculate right-hand side of fractional momentum equation void CalculateRHS( const CalcVectorType& momentum, const ValuesVectorType& pressure, const CalcVectorType& convective_velocity, const CalcVectorType& body_force, const ValuesVectorType& viscosity, CalcVectorType& rhs) { KRATOS_TRY int n_nodes = momentum.size(); //calculating the convective projection #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& pi_i = mPi[i_node]; //****************** //setting to zero for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) pi_i[l_comp] = 0.0; const array_1d<double, TDim>& a_i = convective_velocity[i_node]; const array_1d<double, TDim>& U_i = momentum[i_node]; //const double& p_i = pressure[i_node]; for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour]; const array_1d<double, TDim>& U_j = momentum[j_neighbour]; //const double& p_j = pressure[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index]; edge_ij.Add_ConvectiveContribution(pi_i,a_i,U_i,a_j,U_j); // edge_ij.Add_grad_p(pi_i,p_i,p_j); // edge_ij.Sub_grad_p(pi_i,p_i,p_j); } const double m_inv = mr_matrix_container.GetInvertedMass()[i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) pi_i[l_comp] *= m_inv; } //perform MPI syncronization //calculating the RHS array_1d<double,TDim> stab_low; array_1d<double,TDim> stab_high; #pragma omp parallel for private(stab_low,stab_high) for ( int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist < 0.0) //node is inside domain ---- if outside do nothing { array_1d<double, TDim>& rhs_i = rhs[i_node]; const array_1d<double, TDim>& f_i = body_force[i_node]; const array_1d<double, TDim>& a_i = convective_velocity[i_node]; const array_1d<double, TDim>& U_i = momentum[i_node]; const array_1d<double, TDim>& pi_i = mPi[i_node]; const double& p_i = pressure[i_node]; const double& nu_i = viscosity[i_node]; //double& h_i = mHmin[i_node]; //initializing with the external forces (e.g. gravity) double& m_i = mr_matrix_container.GetLumpedMass()[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] = m_i * f_i[comp]; //porous contribution double eps = mEps[i_node]; double d = mD[i_node]; //diameter of the particle double kinv = 150.0*(1.0-eps)*(1.0-eps)/(eps*eps*eps*d*d); double norm_u_2 = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) norm_u_2 = a_i[comp]*a_i[comp]; // norm_u_2 = U_i[comp]*U_i[comp]; //CORRECTED Term double nonlin_term = kinv * nu_i * eps + 1.75 * sqrt(norm_u_2 * kinv / (eps * 150.0)); //ERROR IN WRITING THE NON LINEAR TERM// // double nonlin_term = kinv * nu_i * eps + 1.75 * norm_u_2 * sqrt(kinv / ( eps * 150.0)); for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] -= m_i * nonlin_term * U_i[comp]; //convective term for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour]; const array_1d<double, TDim>& U_j = momentum[j_neighbour]; const array_1d<double, TDim>& pi_j = mPi[j_neighbour]; const double& p_j = pressure[j_neighbour]; const double& nu_j = viscosity[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index]; edge_ij.Sub_ConvectiveContribution(rhs_i,a_i,U_i,a_j,U_j); //take care! we miss including a B.C. for the external pressure edge_ij.Add_Gp(rhs_i,p_i,p_j); edge_ij.Sub_ViscousContribution(rhs_i,U_i,nu_i,U_j,nu_j); //add stabilization // edge_ij.CalculateConvectionStabilization_LOW( stab_low,a_i,U_i,p_i,a_j,U_j,p_j); edge_ij.CalculateConvectionStabilization_LOW( stab_low,a_i,U_i,a_j,U_j); double edge_tau = mTauConvection[i_node]; edge_ij.CalculateConvectionStabilization_HIGH( stab_high,a_i,pi_i,a_j,pi_j); double beta = mBeta[csr_index]; edge_ij.Sub_StabContribution( rhs_i, edge_tau, beta, stab_low, stab_high); } } } //boundary integrals --> finishing the calculation of the pressure gradient int loop_size1 = mPressureOutletList.size(); #pragma omp parallel for for (int i_pressure = 0; i_pressure < loop_size1; i_pressure++) { unsigned int i_node = mPressureOutletList[i_pressure]; array_1d<double, TDim>& rhs_i = rhs[i_node]; const double& p_ext_i = mPressureOutlet[i_pressure]; const array_1d<double, TDim>& an_i = mPressureNormal[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] -= an_i[comp] * p_ext_i; // const array_1d<double, TDim>& U_i = momentum[i_node]; // const array_1d<double, TDim>& a_i = convective_velocity[i_node]; // double temp = 0.0; // double scalar_prod = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // { // scalar_prod += an_i[comp] * U_i[comp]; // temp += an_i[comp] * an_i[comp]; // } // temp = sqrt(temp); // for (unsigned int comp = 0; comp < TDim; comp++) // // rhs_i[comp] -= U_i[comp] * temp; // // rhs_i[comp] -= an_i[comp] * scalar_prod / temp; // rhs_i[comp] -= a_i[comp] * scalar_prod / temp; } KRATOS_CATCH("") } //************************************************************************* //function to solve fluid equations - fractional step 2: calculate pressure void SolveStep2(typename TLinearSolver::Pointer pLinearSolver) { KRATOS_TRY //PREREQUISITES //allocate memory for variables ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //unknown and right-hand side vector TSystemVectorType dp, rhs; dp.resize(n_nodes); rhs.resize(n_nodes); array_1d<double, TDim> dU_i, dU_j, work_array; //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; #ifdef _OPENMP double time_inv = 0.0; //1.0/delta_t; #endif #ifdef SPLIT_OSS // #pragma omp parallel for firstprivate(time_inv), private(work_array) for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& xi_i = mXi[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) xi_i[comp] = 0.0; const double& p_i = mPn1[i_node]; for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { //get global index of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; const double& p_j = mPn1[j_neighbour]; //projection of pressure gradients CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index]; edge_ij.Add_grad_p(xi_i,p_i,p_j); // // // edge_ij.Sub_grad_p(xi_i,p_i,p_j); } const double& m_inv = mr_matrix_container.GetInvertedMass()[i_node]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) xi_i[l_comp] *= m_inv; } #endif //loop over all nodes #pragma omp parallel for firstprivate(time_inv) for (int i_node = 0; i_node < n_nodes; i_node++) { double& rhs_i = rhs[i_node]; rhs_i = 0.0; double& p_i = mPn1[i_node]; double& eps_i = mEps[i_node]; array_1d<double, TDim>& U_i_curr = mCurrMom[i_node]; //array_1d<double, TDim>& a_i = mA[i_node]; double& rho_i = mRho[i_node]; #ifdef SPLIT_OSS array_1d<double, TDim>& xi_i = mXi[i_node]; #else array_1d<double, TDim>& pi_i = mPi[i_node]; #endif //const double& h_i = mHavg[i_node]; double l_ii = 0.0; //loop over all neighbours for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; double& p_j = mPn1[j_neighbour]; double& eps_j = mEps[j_neighbour]; array_1d<double, TDim>& U_j_curr = mCurrMom[j_neighbour]; //array_1d<double, TDim>& a_j = mA[j_neighbour]; #ifdef SPLIT_OSS array_1d<double, TDim>& xi_j = mXi[j_neighbour]; #else array_1d<double, TDim>& pi_j = mPi[j_neighbour]; #endif //const double& h_j = mHavg[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index]; #ifdef SYMM_PRESS double edge_tau = 0.5*( mTauPressure[i_node] + mTauPressure[j_neighbour]); #else double edge_tau = mTauPressure[i_node]; #endif // double edge_tau = CalculateEdgeTau(time_inv,h_i,a_i,h_j,a_j); // //compute laplacian operator double sum_l_ikjk; edge_ij.CalculateScalarLaplacian(sum_l_ikjk); double sum_l_ikjk_onlystab = sum_l_ikjk * (edge_tau); sum_l_ikjk *= (delta_t + edge_tau); //assemble right-hand side //pressure contribution rhs_i -= sum_l_ikjk_onlystab * (p_j - p_i); //other part of the residual #if !defined(SPLIT_OSS) array_1d<double, TDim>& a_j = mA[j_neighbour]; boost::numeric::ublas::bounded_matrix<double,TDim,TDim>& L = edge_ij.LaplacianIJ; for(unsigned int i = 0; i<TDim; i++) for(unsigned int j = 0; j<TDim; j++) rhs_i -= edge_tau * a_i[j] * L(i,j) * (U_j_curr[j] - U_i_curr[j]); #endif //calculating the divergence of the fract vel edge_ij.Sub_D_v(rhs_i,U_i_curr * eps_i,U_j_curr * eps_j); // edge_ij.Sub_D_v(rhs_i,a_i*rho_i,a_j*rho_i); //high order stabilizing term double temp = 0.0; #ifdef SPLIT_OSS // edge_ij.Add_div_v(temp,mTauPressure[i_node]*xi_i,mTauPressure[j_neighbour]*xi_j); edge_ij.Add_div_v(temp,xi_i,xi_j); #else edge_ij.Add_div_v(temp,pi_i,pi_j); #endif temp *= mBeta[csr_index]; rhs_i += edge_tau * temp; // rhs_i += temp; //assemble laplacian matrix mL(i_node, j_neighbour) = sum_l_ikjk; l_ii -= sum_l_ikjk; } mL(i_node, i_node) = l_ii; //add density variation contribution const double& rho_i_old = mRhoOld[i_node]; const double& m_i = mr_matrix_container.GetLumpedMass()[i_node]; rhs_i -= m_i * (rho_i - rho_i_old)/delta_t; //add mass contribution for compressible flows /* double& m_i = mr_matrix_container.GetLumpedMass()[i_node]; mL(i_node, i_node) += mC2inv[i_node] * m_i / delta_t;*/ } //find the max diagonal term double max_diag = 0.0; for (int i_node = 0; i_node < n_nodes; i_node++) { double L_diag = mL(i_node, i_node); if(fabs(L_diag) > fabs(max_diag)) max_diag = L_diag; } //respect pressure boundary conditions by penalization double huge = max_diag * 1e30; for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) { unsigned int i_node = mPressureOutletList[i_pressure]; mL(i_node, i_node) = huge; rhs[i_node] = 0.0; } //modification for level_set mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes()); //selecting nodes for fixing pressure // std::vector< unsigned int > aux(mdistances.size()); // for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++) // aux[i_dist] = 0; // for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++) // { // if(mdistances[i_dist] > 0) // { // aux[i_dist] = 1; // /* for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_dist]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_dist+1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // aux[j_neighbour] = 1; // // }*/ // } // // } for (unsigned int i_dist = 0; i_dist < mdistances.size(); i_dist++) { // if(aux[i_dist] != 0) if(mdistances[i_dist] > 0) { // mPn1[i_dist] = 0.0; mL(i_dist, i_dist) = huge; rhs[i_dist] = 0.0; } } //set starting vector for iterative solvers for (int i_node = 0; i_node < n_nodes; i_node++) dp[i_node] = 0.0; //solve linear equation system L dp = rhs pLinearSolver->Solve(mL,dp,rhs); KRATOS_WATCH(*pLinearSolver) //update pressure for (int i_node = 0; i_node < n_nodes; i_node++) mPn1[i_node] += dp[i_node]; for (unsigned int i_pressure = 0; i_pressure < mPressureOutletList.size(); i_pressure++) { unsigned int i_node = mPressureOutletList[i_pressure]; mPn1[i_node] = mPressureOutlet[i_pressure]; } //calculate density variation from pressure variation // for (unsigned int i_node = 0; i_node < n_nodes; i_node++) // mRho[i_node] = mRhoOld[i_node] + dp[i_node] * mC2inv[i_node]; // for (unsigned int i_density = 0; i_density < mDensityInlet.size(); i_density++) // { // unsigned int i_node = mVelocityInletList[i_density]; // mRho[i_node] = mDensityInlet[i_density]; // } //write pressure and density to Kratos mr_matrix_container.WriteScalarToDatabase(PRESSURE, mPn1, rNodes); // mr_matrix_container.WriteScalarToDatabase(DENSITY, mRho, rNodes); KRATOS_CATCH("") } //********************************************************************************** //function to solve fluid equations - fractional step 3: correct fractional momentum void SolveStep3() { KRATOS_TRY //get number of nodes ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //CANCELLAAAAAAA è necessario??! Non lo sto riempendo con nulla....e ad ogni passo di tempo è nuovo.... mr_matrix_container.FillVectorFromDatabase(SEEPAGE_DRAG, mDrag, rNodes); //CORRECT FRACTIONAL MOMENTUM //define work array array_1d<double, TDim> correction; //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; //compute end of step momentum #pragma omp parallel for private(correction) firstprivate(delta_t) for (int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist < 0.0) //node is inside domain ---- if outside do nothing { array_1d<double, TDim>& U_i_curr = mCurrMom[i_node]; double delta_p_i = mPn1[i_node] - mPn[i_node]; const double m_inv = mr_matrix_container.GetInvertedMass()[i_node]; //setting to zero for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) correction[l_comp] = 0.0; //compute edge contributions dt*M^(-1)Gp for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; double delta_p_j = mPn1[j_neighbour] - mPn[j_neighbour]; CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index]; edge_ij.Add_Gp(correction,delta_p_i,delta_p_j); // edge_ij.Sub_Gp(correction,delta_p_i,delta_p_j); } //compute prefactor double coefficient = delta_t * m_inv; //correct fractional momentum for (unsigned int comp = 0; comp < TDim; comp++) U_i_curr[comp] += coefficient * correction[comp]; } } ApplyVelocityBC(mCurrMom); CalculateVelocity(mvel_n1,mCurrMom,mRho); MultiplyByPorosity(mvel_n1, mvel_n1, mEps); //write velocity of time step n+1 to Kratos mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, rNodes); CalculateDrag(mA, mCurrMom, mDrag, mViscosity); //CALCULATE THE DRAG MATRIX TO PASS TO THE SOLID PART mr_matrix_container.WriteVectorToDatabase(SEEPAGE_DRAG, mDrag, rNodes); KRATOS_CATCH("") } //************************************ //function to calculate speed of sound void SolveStep4(ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //get number of nodes int n_nodes = mC2inv.size(); //compute speed of sound using equation of state #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { double& rho_i = mRho[i_node]; double p_i_abs = mPn1[i_node]; mC2inv[i_node] = rho_i / (mGamma * p_i_abs); } KRATOS_CATCH("") } //************************************ void ApplyVelocityBC(CalcVectorType& MomentumArray) { KRATOS_TRY //velocity inlet int inlet_size = mVelocityInletList.size(); #pragma omp parallel for schedule(static) for (int i_velocity = 0; i_velocity < inlet_size; i_velocity++) { unsigned int i_node = mVelocityInletList[i_velocity]; array_1d<double, TDim>& u_i = mVelocityInlet[i_velocity]; double& rho_i = mDensityInlet[i_velocity]; array_1d<double, TDim>& U_i = MomentumArray[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) U_i[comp] = rho_i * u_i[comp]; } //slip condition int slip_size = mSlipBoundaryList.size(); #pragma omp parallel for for (int i_slip = 0; i_slip < slip_size; i_slip++) { unsigned int i_node = mSlipBoundaryList[i_slip]; array_1d<double, TDim>& U_i = MomentumArray[i_node]; array_1d<double, TDim>& an_i = mSlipNormal[i_node]; double projection_length = 0.0; double normalization = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) { projection_length += U_i[comp] * an_i[comp]; normalization += an_i[comp] * an_i[comp]; } projection_length /= normalization; //tangential momentum as difference between original and normal momentum for (unsigned int comp = 0; comp < TDim; comp++) U_i[comp] -= projection_length * an_i[comp]; } //no-slip condition int no_slip_size = mNoSlipBoundaryList.size(); #pragma omp parallel for for (int i_noslip = 0; i_noslip < no_slip_size; i_noslip++) { unsigned int i_node = mNoSlipBoundaryList[i_noslip]; array_1d<double, TDim>& U_i = MomentumArray[i_node]; noalias(U_i) = ZeroVector(TDim); } KRATOS_CATCH("") } //******************************** //function to compute coefficients void ExtrapolateVelocities(unsigned int extrapolation_layers) { KRATOS_TRY typedef Node<3> PointType; typedef PointerVector<PointType > PointVector; typedef PointVector::iterator PointIterator; //reset is visited flag for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { inode->GetValue(IS_VISITED) = 0; } //generate a container with the layers to be extrapolated std::vector< PointVector > layers(extrapolation_layers); //detect the nodes inside the fluid surface for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { if( inode->FastGetSolutionStepValue(DISTANCE) <= 0.0) //candidates are only the ones inside the fluid domain { GlobalPointersVector< Node<3> >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES); for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { if(i->FastGetSolutionStepValue(DISTANCE) > 0) //add the node as free surface if one of its neighb is outside { if( inode->GetValue(IS_VISITED) == 0) { layers[0].push_back( *(inode.base() ) ); inode->GetValue(IS_VISITED) = 1; } } } } } // //reset is visited flag // for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); // inode != mr_model_part.NodesEnd(); // inode++) // { // inode->GetValue(IS_VISITED) = 0; // } //fill the following layers by neighbour relationships //each layer fills the following for(unsigned int il = 0; il<extrapolation_layers-1; il++) { for( PointIterator iii=(layers[il]).begin(); iii!=(layers[il]).end(); iii++) { GlobalPointersVector< Node<3> >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES); for(GlobalPointersVector< Node<3> >::iterator jjj=neighb_nodes.begin(); jjj !=neighb_nodes.end(); jjj++) //destination = origin1 + value * Minv*origin { if( jjj->FastGetSolutionStepValue(DISTANCE) > 0 && jjj->GetValue(IS_VISITED) == 0.0 ) { layers[il+1].push_back( Node<3>::Pointer( *(jjj.base() ) ) ); jjj->GetValue(IS_VISITED) = double(il+2.0); } } } } //perform extrapolation layer by layer by making an average //of the neighbours of lower order array_1d<double,3> aux; for(unsigned int il = 1; il<extrapolation_layers; il++) { for( PointIterator iii=layers[il].begin(); iii!=layers[il].end(); iii++) { // noalias(aux) = ZeroVector(3); // double dist_min = 10000000000.0; // // array_1d<double,3>& coords_I = iii->Coordinates(); // // GlobalPointersVector< Node<3> >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES); // for(GlobalPointersVector< Node<3> >::iterator j=neighb_nodes.begin(); j !=neighb_nodes.end(); j++) // { // if(j->GetValue(IS_VISITED) < il+1) //if it is on the next layer // { // array_1d<double,3>& coords_J = j->Coordinates(); // // double dist = 0.0; // for (unsigned int comp = 0; comp < TDim; comp++) // dist += pow(coords_I[comp]-coords_J[comp],2); // // if(dist < dist_min) // { // dist_min = dist; // noalias( iii->FastGetSolutionStepValue(VELOCITY) ) = j->FastGetSolutionStepValue(VELOCITY); // } // // } // } //extrapolate the average velocity noalias(aux) = ZeroVector(3); double avg_number = 0.0; GlobalPointersVector< Node<3> >& neighb_nodes = iii->GetValue(NEIGHBOUR_NODES); for(GlobalPointersVector< Node<3> >::iterator i=neighb_nodes.begin(); i !=neighb_nodes.end(); i++) { if(i->GetValue(IS_VISITED) < il+1 && i->GetValue(IS_VISITED)) { noalias(aux) += i->FastGetSolutionStepValue(VELOCITY); avg_number += 1.0; } } if(avg_number != 0.0) aux /= avg_number; noalias( iii->FastGetSolutionStepValue(VELOCITY) ) = aux; // noalias( iii->FastGetSolutionStepValue(VELOCITY,1) ) = aux; } } // mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes()); // mr_matrix_container.FillScalarFromDatabase(DISTANCE, mdistances, mr_model_part.Nodes()); // // unsigned int n_nodes = mPn1.size(); // // //pressure coefficient // // #pragma omp parallel for // for (int i_node = 0; i_node < n_nodes; i_node++) // { // const double dist_i = mdistances[i_node]; // // // if( dist_i > 0.0) // { // double nn = 0.0; // // array_1d<double, TDim>& vel_i = mvel_n1[i_node]; // // for (unsigned int comp = 0; comp < TDim; comp++) // vel_i[comp] = 0.0; // // //compute edge contributions dt*M^(-1)Gp // for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) // { // unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; // // const double dist_j = mdistances[j_neighbour]; // // if(dist_j <= 0.0) // { // const array_1d<double, TDim>& vel_j = mvel_n1[j_neighbour]; // // for (unsigned int comp = 0; comp < TDim; comp++) // vel_i[comp] += vel_j[comp]; // // nn += 1.0; // // } // } // // if(nn> 1e-6) //it should be either 0 1 .. N // { // // std::cout << "inode= " << i_node << "nn = " << nn << std::endl; // // double inv_nn = 1.0/nn; // for (unsigned int comp = 0; comp < TDim; comp++) // vel_i[comp] *= inv_nn; // KRATOS_WATCH(vel_i); // } // // } // } // // // // // ApplyVelocityBC(mCurrMom); // // // //write velocity of time step n+1 to Kratos // mr_matrix_container.WriteVectorToDatabase(VELOCITY, mvel_n1, mr_model_part.Nodes()); KRATOS_CATCH("") } void ChangeSignToDistance() { KRATOS_TRY for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { double dist = inode->FastGetSolutionStepValue(DISTANCE); inode->FastGetSolutionStepValue(DISTANCE) = -dist; } KRATOS_CATCH("") } void MarkNodesByDistance(double min, double max ) { KRATOS_TRY for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { double dist = inode->FastGetSolutionStepValue(DISTANCE); if(dist > min && dist < max) inode->GetValue(IS_VISITED) = 1; else inode->GetValue(IS_VISITED) = 0; } KRATOS_CATCH("") } void SaveScalarVariableToOldStep(Variable<double>& rVar) { KRATOS_TRY for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { inode->FastGetSolutionStepValue(rVar,1) = inode->FastGetSolutionStepValue(rVar); } KRATOS_CATCH("") } void MarkExternalAndMixedNodes( ) { KRATOS_TRY for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { inode->GetValue(IS_VISITED) = 0; } //detect the nodes inside the fluid surface for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { if( inode->FastGetSolutionStepValue(DISTANCE) > 0.0) //candidates are only the ones inside the fluid domain { inode->GetValue(IS_VISITED) = 1; GlobalPointersVector< Node<3> >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES); for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { i->GetValue(IS_VISITED) = 1; } } } KRATOS_CATCH("") } void MarkInternalAndMixedNodes( ) { KRATOS_TRY for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { inode->GetValue(IS_VISITED) = 0; } //detect the nodes inside the fluid surface for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { if( inode->FastGetSolutionStepValue(DISTANCE) <= 0.0) //candidates are only the ones inside the fluid domain { inode->GetValue(IS_VISITED) = 1; GlobalPointersVector< Node<3> >& neighb_nodes = inode->GetValue(NEIGHBOUR_NODES); for( GlobalPointersVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { i->GetValue(IS_VISITED) = 1; } } } KRATOS_CATCH("") } void CalculateVariablesDistribution(double rho_dense, double rho_light, double nu_dense, double nu_light, double eps, const array_1d<double,3>& body_force) { KRATOS_TRY for( ModelPart::NodesContainerType::iterator inode = mr_model_part.NodesBegin(); inode != mr_model_part.NodesEnd(); inode++) { double dist = inode->FastGetSolutionStepValue(DISTANCE); //calculated smoothed density and viscosity distribution double H; if(dist < -eps) H = 0.0; else if(dist > eps) H = 1.0; else H = (dist+eps)/(2.0*eps) + sin(3.141592*dist/eps)/(2.0*3.141592); double rho_node = rho_dense + (rho_light-rho_dense)*H; inode->FastGetSolutionStepValue(DENSITY) = rho_node; double nu_node = nu_dense + (nu_light-nu_dense)*H; inode->FastGetSolutionStepValue(VISCOSITY) = nu_node; //reset variables outside of the fluid domain if( dist < 0 ) noalias(inode->FastGetSolutionStepValue(BODY_FORCE)) = body_force; else { inode->FastGetSolutionStepValue(PRESSURE) = 0.0; noalias(inode->FastGetSolutionStepValue(BODY_FORCE)) = body_force; noalias(inode->FastGetSolutionStepValue(VELOCITY)) = ZeroVector(3); noalias(inode->FastGetSolutionStepValue(VELOCITY,1)) = ZeroVector(3); } } KRATOS_CATCH("") } //******************************** //function to compute coefficients void CalculateCoefficients(ModelPart::NodesContainerType& rNodes) { KRATOS_TRY unsigned int n_nodes = mPn1.size(); //pressure coefficient #pragma omp parallel for for ( int i_node = 0; i_node < n_nodes; i_node++) mCp[i_node] = (mPn1[i_node] - mPinf) / mQinf; mr_matrix_container.WriteScalarToDatabase(PRESSURE_COEFFICIENT, mCp, rNodes); //Mach number #pragma omp parallel for for ( int i_node = 0; i_node < n_nodes; i_node++) mMach[i_node] = norm_2(mvel_n1[i_node]) * sqrt(mC2inv[i_node]); mr_matrix_container.WriteScalarToDatabase(MACH_NUMBER, mMach, rNodes); KRATOS_CATCH("") } //************************************** //function to calculate the area normals void CalculateNormals(ModelPart::ConditionsContainerType& rConditions) //void CalculateNormals(ModelPart::NodesContainerType& rNodes, MatrixContainer& matrix_container) { KRATOS_TRY //calculate area normals face-by-face array_1d<double,3> area_normal; //2D case if(TDim == 2) { for(ModelPart::ConditionsContainerType::iterator cond_it=rConditions.begin(); cond_it!=rConditions.end(); cond_it++) CalculateNormal2D(cond_it,area_normal); } //3D case else if(TDim == 3) { //help vectors for cross product array_1d<double,3> v1; array_1d<double,3> v2; for(ModelPart::ConditionsContainerType::iterator cond_it=rConditions.begin(); cond_it!=rConditions.end(); cond_it++) CalculateNormal3D(cond_it,area_normal,v1,v2); } //(re)initialize normals unsigned int n_nodes = mNodalFlag.size(); mSlipNormal.resize(n_nodes); mPressureNormal.resize(n_nodes); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { noalias(mSlipNormal[i_node]) = ZeroVector(TDim); noalias(mPressureNormal[i_node]) = ZeroVector(TDim); } //loop over all faces for(ModelPart::ConditionsContainerType::iterator cond_it=rConditions.begin(); cond_it!=rConditions.end(); cond_it++) { //get geometry data of the face Geometry<Node<3> >& face_geometry = cond_it->GetGeometry(); //boolean variables to characterize faces bool is_slip_condition = true; bool is_pressure_face = true; bool is_velocity_inlet = true; for (unsigned int if_node = 0; if_node < TDim; if_node++) { unsigned int i_node = static_cast<unsigned int>(face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX)); //if the face contains at least 1 node that is not of slip or mixed //then it is not a slip face if ( static_cast<unsigned int>(mNodalFlag[i_node]) != 3 && static_cast<unsigned int>(mNodalFlag[i_node]) != 4) is_slip_condition = false; //if the face contains at least one node of pressure it is a pressure face if ( static_cast<unsigned int>(mNodalFlag[i_node]) != 5 && static_cast<unsigned int>(mNodalFlag[i_node]) != 4) is_pressure_face = false; if (static_cast<unsigned int>(mNodalFlag[i_node]) != 1) is_velocity_inlet = false; } //reference for area normal of the face array_1d<double,3>& face_normal = cond_it->GetValue(NORMAL); double node_factor = 1.0/TDim; //slip condition if (is_slip_condition == true) for (unsigned int if_node = 0; if_node < TDim; if_node++) { unsigned int i_node = static_cast<unsigned int>(face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX)); array_1d<double,TDim>& slip_normal = mSlipNormal[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) slip_normal[comp] += node_factor * face_normal[comp]; } //pressure face if (is_pressure_face == true || is_velocity_inlet == true) for (unsigned int if_node = 0; if_node < TDim; if_node++) { unsigned int i_node = static_cast<unsigned int>(face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX)); array_1d<double,TDim>& pressure_normal = mPressureNormal[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) pressure_normal[comp] += node_factor * face_normal[comp]; } //remaining case ... add pressure to pressure nodes and slip to the others if(is_pressure_face == false && is_slip_condition == false && is_velocity_inlet == false) for (unsigned int if_node = 0; if_node < TDim; if_node++) { unsigned int i_node = static_cast<unsigned int>(face_geometry[if_node].FastGetSolutionStepValue(AUX_INDEX)); if ( static_cast<unsigned int>(mNodalFlag[i_node]) == 5) //pressure node { array_1d<double,TDim>& pressure_normal = mPressureNormal[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) pressure_normal[comp] += node_factor * face_normal[comp]; } else if ( static_cast<unsigned int>(mNodalFlag[i_node]) == 3) //slip node { array_1d<double,TDim>& slip_normal = mPressureNormal[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) slip_normal[comp] += node_factor * face_normal[comp]; } } } KRATOS_CATCH("") } void SetSpeedOfSound(double c, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY unsigned int n_nodes = mC2inv.size(); double temp = 1.0 / (c * c); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) mC2inv[i_node] = temp; //WriteScalarToDatabase(LIFT_COEFFICIENT, mC2inv, rNodes); KRATOS_CATCH("") } void SetFreeFlowConditions(array_1d<double, 3> velocity, double pressure, double density, double gamma) { KRATOS_TRY mUinf = velocity; mPinf = pressure; mRhoinf = density; mGamma = gamma; mQinf = 0.5 * mRhoinf * norm_2(mUinf) * norm_2(mUinf); mMachinf = norm_2(mUinf) / (sqrt(mGamma*mPinf/mRhoinf)); unsigned int n_nodes = mPn1.size(); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) mC2inv[i_node] = mRho[i_node] / (mGamma * mPn1[i_node]); for (unsigned int i_velocity = 0; i_velocity < mVelocityInletList.size(); i_velocity++) noalias(mVelocityInlet[i_velocity]) = velocity; KRATOS_CATCH("") } //********************************************************************** void CalculateVelocity( CalcVectorType& velocity, const CalcVectorType& momentum, const ValuesVectorType& rho) { int loop_size = velocity.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { double inv_rho = 1.0/mRho[i_node]; array_1d<double,TDim>& vel = velocity[i_node]; const array_1d<double,TDim>& mom = momentum[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) vel[comp] = mom[comp] * inv_rho; } } void SetDissipationLength(double h) { KRATOS_TRY mDissipationLength = h; KRATOS_CATCH("") } void CalculateDrag (CalcVectorType& convective_velocity, CalcVectorType& momentum, CalcVectorType& drag, const ValuesVectorType& viscosity) { mViscosity = viscosity; int n_nodes = mViscosity.size(); for ( int i_node = 0; i_node < n_nodes; i_node++) { double dist = mdistances[i_node]; if (dist < 0.0) //node is inside domain ---- if outside do nothing { const array_1d<double, TDim>& a_i = convective_velocity[i_node]; const array_1d<double, TDim>& U_i = momentum[i_node]; array_1d<double, TDim>& Drag_i = drag[i_node]; const double& nu_i = viscosity[i_node]; //porous contribution double eps = mEps[i_node]; double d = mD[i_node]; //diameter of the particle double kinv = 150.0*(1.0-eps)*(1.0-eps)/(eps*eps*eps*d*d); double norm_u_2 = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) norm_u_2 = a_i[comp]*a_i[comp]; //CORRECTED Term double nonlin_term = kinv * nu_i * eps + 1.75 * norm_u_2 * sqrt(kinv / ( eps * 150.0)); for (unsigned int comp = 0; comp < TDim; comp++) Drag_i[comp] = nonlin_term * U_i[comp]; } } } //******************************* //function to free dynamic memory void Clear() { KRATOS_TRY mWork.clear(); mvel_n.clear(); mvel_n1.clear(); mA.clear(); mPn.clear(); mPn1.clear(); mHmin.clear(); mHavg.clear(); //mAreaNormal.clear(); //mvel_nitNormal.clear(); mPressureNormal.clear(); mSlipNormal.clear(); mNodalFlag.clear(); mVelocityInletList.clear(); mVelocityInlet.clear(); mPressureOutletList.clear(); mPressureOutlet.clear(); mSlipBoundaryList.clear(); mNoSlipBoundaryList.clear(); mL.clear(); mTauPressure.clear(); mTauConvection.clear(); mViscosity.clear(); mEps.clear(); mEpsOld.clear(); KRATOS_CATCH("") } //****************************************** void CalculateForces() { KRATOS_TRY //variables for node based data handling ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes(); int n_nodes = rNodes.size(); //storage of nodal values in local variables CalcVectorType rhs; rhs.resize(n_nodes); //read velocity and pressure data from Kratos mr_matrix_container.FillVectorFromDatabase(VELOCITY, mvel_n1, rNodes); mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mvel_n, rNodes); mr_matrix_container.FillScalarFromDatabase(PRESSURE, mPn1, rNodes); mr_matrix_container.FillOldScalarFromDatabase(PRESSURE, mPn, rNodes); mr_matrix_container.FillScalarFromDatabase(DENSITY, mRho, rNodes); mr_matrix_container.FillOldScalarFromDatabase(DENSITY, mRhoOld, rNodes); mr_matrix_container.FillVectorFromDatabase(BODY_FORCE, mBodyForce, rNodes); mr_matrix_container.FillScalarFromDatabase(VISCOSITY, mViscosity, rNodes); //read time step size from Kratos ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo(); double delta_t = CurrentProcessInfo[DELTA_TIME]; #pragma omp parallel for for ( int i_node = 0; i_node < n_nodes; i_node++) { // -> mCurrMom //compute the momentum at the current step -> mCurrMom double& rho_i = mRho[i_node]; const array_1d<double, TDim>& u_i = mvel_n1[i_node]; array_1d<double, TDim>& U_i = mCurrMom[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) U_i[comp] = rho_i * u_i[comp]; // -> mInitMom double& rho_i_old = mRhoOld[i_node]; //compute the momentum at the beginning of the tep const array_1d<double, TDim>& u_i_old = mvel_n[i_node]; array_1d<double, TDim>& U_i_old = mInitMom[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) U_i_old[comp] = rho_i_old * u_i_old[comp]; //compute volumetric body force array_1d<double, TDim>& f_i = mBodyForce[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) f_i[comp] *= rho_i; } //compute advective velocity - area average of the current velocity CalculateAdvectiveVelocity(mvel_n1, mA, msmooth_convective_velocity); //compute intrinsic time double time_inv = 1.0/delta_t; #pragma omp parallel for firstprivate(time_inv) for (int i_node = 0; i_node < n_nodes; i_node++) { // double& h_i = mHavg[i_node]; double& h_i = mHmin[i_node]; array_1d<double, TDim>& a_i = mA[i_node]; const double nu_i = mViscosity[i_node]; double vel_norm = norm_2(a_i); mTauPressure[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv + nu_i /(h_i*h_i) ); mTauConvection[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv + nu_i /(h_i*h_i) ); if (mTauPressure[i_node] < delta_t) mTauPressure[i_node] = delta_t; else if(mTauPressure[i_node] > 100.0*delta_t) mTauPressure[i_node] = 100.0*delta_t; } //compute pressure switch if (mFirstStep == false) if(minclude_shock_capturing == true) ComputeMonotonicityPreserving(); mr_matrix_container.SetToZero(rhs); CalculateRHS( mCurrMom, mPn1, mA, mBodyForce, mViscosity, rhs); ValuesVectorType& lumped_mass = mr_matrix_container.GetLumpedMass(); //add inertia term #pragma omp parallel for firstprivate(time_inv) for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& rhs_i = rhs[i_node]; const array_1d<double, TDim>& curr_mom_i = mCurrMom[i_node]; const array_1d<double, TDim>& old_mom_i = mInitMom[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp]-=time_inv*lumped_mass[i_node]*(curr_mom_i[comp]-old_mom_i[comp]); //change of sign /* for (unsigned int comp = 0; comp < TDim; comp++) rhs_i[comp] = -rhs_i[comp];*/ } mr_matrix_container.WriteVectorToDatabase(FORCE, rhs, mr_model_part.Nodes()); KRATOS_CATCH("") } private: MatrixContainer& mr_matrix_container; ModelPart& mr_model_part; bool msmooth_convective_velocity; bool minclude_shock_capturing; //nodal values //velocity vector U at time steps n and n+1 CalcVectorType mWork, mvel_n, mvel_n1, mInitMom, mCurrMom, mFracMom, mx; //pressure vector p at time steps n and n+1 ValuesVectorType mPn, mPn1, mViscosity; //monotony preserving term ValuesVectorType mBeta; //density ValuesVectorType mRho, mRhoOld; //compressibility parameter ValuesVectorType mC2inv; double mGamma; double mQinf; array_1d<double, TDim> mUinf; double mPinf; double mRhoinf; double mMachinf; //coefficients ValuesVectorType mCp, mMach, mdistances; //advective velocity vector CalcVectorType mA; //minimum length of the edges surrounding edges surrounding each nodal point ValuesVectorType mHmin; ValuesVectorType mHavg; ValuesVectorType mEps; ValuesVectorType mEpsOld; ValuesVectorType mD; CalcVectorType mEdgeDimensions; double mDissipationLength; //area normal //CalcVectorType mAreaNormal, mvel_nitNormal; CalcVectorType mPressureNormal, mSlipNormal; //projection terms CalcVectorType mPi, mXi; CalcVectorType mBodyForce, mDrag; //flag for first time step bool mFirstStep; //flag to differentiate interior and boundary nodes ValuesVectorType mNodalFlag; //lists of nodes with different types of boundary conditions IndicesVectorType mSlipBoundaryList, mNoSlipBoundaryList, mPressureOutletList, mVelocityInletList; IndicesVectorType mDissipationList; CalcVectorType mVelocityInlet; ValuesVectorType mPressureOutlet, mDensityInlet; //list for pressure boundary faces ModelPart::ConditionsContainerType mPressureFaces; //intrinsic time step size ValuesVectorType mTauPressure; ValuesVectorType mTauConvection; //variables for resolving pressure equation //laplacian matrix TSystemMatrixType mL; //*********************************************************** //functions to calculate area normals for boundary conditions void CalculateNormal2D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double,3>& area_normal) { Geometry<Node<3> >& face_geometry = (cond_it)->GetGeometry(); area_normal[0] = face_geometry[1].Y() - face_geometry[0].Y(); area_normal[1] = - (face_geometry[1].X() - face_geometry[0].X()); area_normal[2] = 0.00; noalias((cond_it)->GetValue(NORMAL)) = area_normal; } void CalculateNormal3D(ModelPart::ConditionsContainerType::iterator cond_it, array_1d<double,3>& area_normal, array_1d<double,3>& v1,array_1d<double,3>& v2 ) { Geometry<Node<3> >& face_geometry = (cond_it)->GetGeometry(); v1[0] = face_geometry[1].X() - face_geometry[0].X(); v1[1] = face_geometry[1].Y() - face_geometry[0].Y(); v1[2] = face_geometry[1].Z() - face_geometry[0].Z(); v2[0] = face_geometry[2].X() - face_geometry[0].X(); v2[1] = face_geometry[2].Y() - face_geometry[0].Y(); v2[2] = face_geometry[2].Z() - face_geometry[0].Z(); MathUtils<double>::CrossProduct(area_normal,v1,v2); area_normal *= -0.5; noalias((cond_it)->GetValue(NORMAL)) = area_normal; } //****************************************** //function to calculate advective velocities void CalculateAdvectiveVelocity(const CalcVectorType& rVelocity, CalcVectorType& rAdvectiveVelocity, bool smooth_convective_velocity) { KRATOS_TRY if(smooth_convective_velocity == true) { //get number of nodes int n_nodes = rVelocity.size(); //initialize advective velocities /* #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) noalias(rAdvectiveVelocity[i_node]) = ZeroVector(TDim);*/ //loop over all nodes #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { //reference for advective velocity of node i array_1d<double, TDim>& a_i = rAdvectiveVelocity[i_node]; noalias(a_i) = ZeroVector(TDim); //setting weighting mass to zero double mass_sum = 0.0; //loop over all neighbours for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { //add consistent mass of edge ij to denominator double& m_ij = mr_matrix_container.GetEdgeValues()[csr_index].Mass; mass_sum += m_ij; //reference for velocity of neighbouring node j unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; const array_1d<double, TDim>& u_j = rVelocity[j_neighbour]; //add contributions of numerator componentwisely for (unsigned int comp = 0; comp < TDim; comp++) a_i[comp] += m_ij * u_j[comp]; } //for Dirichlet boundary nodes lumped values have to be included //attention: nodes with Neumann pressure condition are treated as interior points! if ((static_cast<unsigned int>(mNodalFlag[i_node]) != 0) && (static_cast<unsigned int>(mNodalFlag[i_node]) != 5) && (static_cast<unsigned int>(mNodalFlag[i_node]) != 4)) { //taking into account diagonal matrix elements double m_ii = mr_matrix_container.GetLumpedMass()[i_node] - mass_sum; const array_1d<double, TDim>& u_i = rVelocity[i_node]; //add contribution to advective velocity for (unsigned int comp = 0; comp < TDim; comp++) a_i[comp] += m_ii * u_i[comp]; //add contribution to mass sum mass_sum += m_ii; } //weighting contributions by the mass sum of all (surrounding) edges for (unsigned int comp = 0; comp < TDim; comp++) a_i[comp] /= mass_sum; } } else { //get number of nodes int n_nodes = rVelocity.size(); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& aaa = rAdvectiveVelocity[i_node]; const array_1d<double, TDim>& u_i = rVelocity[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) aaa[comp] = u_i[comp]; } // noalias(rAdvectiveVelocity[i_node]) = mvel_n1[i_node]; } // for (unsigned int i_node = 0; i_node < n_nodes; i_node++) // noalias(rAdvectiveVelocity[i_node]) = mvel_n1[i_node]; KRATOS_CATCH("") } //********************************************************* //function to calculate minimum length of surrounding edges void CalculateEdgeLengths(ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //get number of nodes unsigned int n_nodes = rNodes.size(); //reserve memory for storage of nodal coordinates std::vector< array_1d<double, 3> > position; position.resize(n_nodes); //get position of all nodes for (typename ModelPart::NodesContainerType::iterator node_it=rNodes.begin(); node_it!=rNodes.end(); node_it++) { //get the global index of the node unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); //save its coordinates locally noalias(position[i_node]) = node_it->Coordinates(); //initialize minimum edge length with relatively big values mHmin[i_node] = 1e10; } ValuesVectorType& aaa = mr_matrix_container.GetHmin(); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { mHmin[i_node] = aaa[i_node]; } //take unstructured meshes into account if(TDim == 2) { for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { double& h_i = mHavg[i_node]; double& m_i = mr_matrix_container.GetLumpedMass()[i_node]; // double& rho_i = mRho[i_node]; h_i = sqrt(2.0*m_i); } } else if(TDim == 3) { for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { double& h_i = mHavg[i_node]; double& m_i = mr_matrix_container.GetLumpedMass()[i_node]; // double& rho_i = mRho[i_node]; h_i = pow (6.0*m_i, 1.0/3.0); } } //compute edge coordinates for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, 3>& pos_i = position[i_node]; for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; array_1d<double, 3>& pos_j = position[j_neighbour]; array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index]; for (unsigned int comp = 0; comp < TDim; comp++) l_k[comp] = pos_i[comp] - pos_j[comp]; } } KRATOS_CATCH("") } //******************************************************* //function to calculate monotonicity preserving term beta void ComputeMonotonicityPreserving() { KRATOS_TRY unsigned int n_nodes = mPn1.size(); for (unsigned int i_node = 0; i_node < n_nodes; i_node++) { double& p_i = mPn1[i_node]; array_1d<double, TDim>& xi_i = mXi[i_node]; for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++) { unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index]; double& p_j = mPn1[j_neighbour]; array_1d<double, TDim>& l_k = mEdgeDimensions[csr_index]; array_1d<double, TDim>& xi_j = mXi[j_neighbour]; double press_diff = p_i - p_j; double proj_sum = 0.0; for (unsigned int comp = 0; comp < TDim; comp++) proj_sum += l_k[comp] * (xi_i[comp] + xi_j[comp]); proj_sum *= 0.5; double temp = fabs(press_diff) + fabs(proj_sum); if (temp <= 1e-10) mBeta[csr_index] = 1.0; else // mBeta[csr_index] = 1.0 - fabs(fabs(press_diff) - fabs(proj_sum)) / temp; mBeta[csr_index] = 1.0 - fabs(press_diff + proj_sum) / temp; /*mBeta[csr_index]=1.0;*/ /* if (mNodalFlag[i_node] == 1.0 || mNodalFlag[i_node] == 4.0 || mNodalFlag[i_node] == 5.0 || mNodalFlag[j_neighbour] == 1.0 || mNodalFlag[j_neighbour] == 4.0 || mNodalFlag[j_neighbour] == 5.0) mBeta[csr_index] = 0.0;*/ /*if (mBeta[csr_index]<0.0 && mBeta[csr_index]>1.0) KRATOS_WATCH(mBeta[csr_index]);*/ } } KRATOS_CATCH("") } inline double CalculateEdgeTau( const double time_inv, const double h_i, const array_1d<double,TDim>& v_i, const double h_j, const array_1d<double,TDim>& v_j) { double h_avg = 0.5 * (h_i+h_j); //calculating norm o double norm_avg = 0.0; for(unsigned int k=0; k<TDim; k++) norm_avg += pow(v_i[k] + v_j[k],2); norm_avg *= 0.25; norm_avg = sqrt(norm_avg); return 1.0 / (2.0 * norm_avg/h_avg + time_inv + 1e-6 /(h_avg*h_avg) ); } void DivideByPorosity(CalcVectorType& r_destination,const CalcVectorType& r_origin, const ValuesVectorType& porosity) { int n_nodes = r_origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& dest = r_destination[i_node]; const array_1d<double, TDim>& orig = r_origin[i_node]; double factor = 1.0/porosity[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = factor * orig[comp]; } } void MultiplyByPorosity(CalcVectorType& r_destination, const CalcVectorType& r_origin, const ValuesVectorType& porosity) { int n_nodes = r_origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; i_node++) { array_1d<double, TDim>& dest = r_destination[i_node]; const array_1d<double, TDim>& orig = r_origin[i_node]; double factor = porosity[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = factor * orig[comp]; } } }; } //namespace Kratos #endif //KRATOS_LEVELSET_FLUID_SOLVER_H_INCLUDED defined
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1,4),ceild(4*t2-Nz+5,8));t3<=min(min(floord(4*Nt+Ny-9,8),floord(2*t1+Ny-3,8)),floord(4*t2+Ny-9,8));t3++) { for (t4=max(max(ceild(t1-28,32),ceild(4*t2-Nz-51,64)),ceild(8*t3-Ny-51,64));t4<=min(min(min(floord(4*Nt+Nx-9,64),floord(2*t1+Nx-3,64)),floord(4*t2+Nx-9,64)),floord(8*t3+Nx-5,64));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(64*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(64*t4,4*t5+4); ubv=min(64*t4+63,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
core_clange.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlange.c, normal z -> c, Fri Sep 28 17:38:21 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" #include <math.h> /***************************************************************************//** * * @ingroup core_lange * * Calculates max, one, infinity or Frobenius norm of a given matrix. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] m * The number of rows of the matrix A. m >= 0. When m = 0, * the returned value is set to zero. * * @param[in] n * The number of columns of the matrix A. n >= 0. When n = 0, * the returned value is set to zero. * * @param[in] A * The m-by-n matrix A. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[in] work * The auxiliary work array. * * @param[out] value * The specified norm of the given matrix A * ******************************************************************************/ __attribute__((weak)) void plasma_core_clange(plasma_enum_t norm, int m, int n, const plasma_complex32_t *A, int lda, float *work, float *value) { *value = LAPACKE_clange_work(LAPACK_COL_MAJOR, lapack_const(norm), m, n, A, lda, work); } /******************************************************************************/ void plasma_core_omp_clange(plasma_enum_t norm, int m, int n, const plasma_complex32_t *A, int lda, float *work, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:1]) { if (sequence->status == PlasmaSuccess) plasma_core_clange(norm, m, n, A, lda, work, value); } } /******************************************************************************/ void plasma_core_omp_clange_aux(plasma_enum_t norm, int m, int n, const plasma_complex32_t *A, int lda, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { switch (norm) { case PlasmaOneNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:n]) { if (sequence->status == PlasmaSuccess) { for (int j = 0; j < n; j++) { value[j] = cabsf(A[lda*j]); for (int i = 1; i < m; i++) { value[j] += cabsf(A[lda*j+i]); } } } } break; case PlasmaInfNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:m]) { if (sequence->status == PlasmaSuccess) { for (int i = 0; i < m; i++) value[i] = 0.0; for (int j = 0; j < n; j++) { for (int i = 0; i < m; i++) { value[i] += cabsf(A[lda*j+i]); } } } } break; } }
Parallel.h
#pragma once #include <ATen/ATen.h> #include <cstddef> #ifdef _OPENMP #include <omp.h> #endif namespace at { namespace internal { // This parameter is heuristically chosen to determine the minimum number of // work that warrants paralellism. For example, when summing an array, it is // deemed inefficient to parallelise over arrays shorter than 32768. Further, // no parallel algorithm (such as parallel_reduce) should split work into // smaller than GRAIN_SIZE chunks. constexpr int64_t GRAIN_SIZE = 32768; } // namespace internal inline int64_t divup(int64_t x, int64_t y) { return (x + y - 1) / y; } template <class F> inline void parallel_for( const int64_t begin, const int64_t end, const int64_t grain_size, const F f) { #ifdef _OPENMP #pragma omp parallel if ((end - begin) >= grain_size) { int64_t num_threads = omp_get_num_threads(); int64_t tid = omp_get_thread_num(); int64_t chunk_size = divup((end - begin), num_threads); int64_t begin_tid = begin + tid * chunk_size; if (begin_tid < end) f(begin_tid, std::min(end, chunk_size + begin_tid)); } #else f(begin, end); #endif } template <class scalar_t, class F, class SF> inline scalar_t parallel_reduce( const int64_t begin, const int64_t end, const int64_t grain_size, const scalar_t ident, const F f, const SF sf) { if (get_num_threads() == 1) { return f(begin, end, ident); } else { const int64_t num_results = divup((end - begin), grain_size); std::vector<scalar_t> results(num_results); scalar_t* results_data = results.data(); #pragma omp parallel for if ((end - begin) >= grain_size) for (int64_t id = 0; id < num_results; id++) { int64_t i = begin + id * grain_size; results_data[id] = f(i, i + std::min(end - i, grain_size), ident); } return std::accumulate( results_data, results_data + results.size(), ident, sf); } } } // namespace at
GB_binop__islt_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__islt_uint32 // A.*B function (eWiseMult): GB_AemultB__islt_uint32 // A*D function (colscale): GB_AxD__islt_uint32 // D*A function (rowscale): GB_DxB__islt_uint32 // C+=B function (dense accum): GB_Cdense_accumB__islt_uint32 // C+=b function (dense accum): GB_Cdense_accumb__islt_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__islt_uint32 // C=scalar+B GB_bind1st__islt_uint32 // C=scalar+B' GB_bind1st_tran__islt_uint32 // C=A+scalar GB_bind2nd__islt_uint32 // C=A'+scalar GB_bind2nd_tran__islt_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_UINT32 || GxB_NO_ISLT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__islt_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__islt_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__islt_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__islt_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__islt_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__islt_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__islt_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__islt_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__islt_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__islt_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__islt_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
volumeramdistancetransform.h
/********************************************************************************* * * Inviwo - Interactive Visualization Workshop * * Copyright (c) 2016-2020 Inviwo Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *********************************************************************************/ #ifndef IVW_VOLUMERAMDISTANCETRANSFORM_H #define IVW_VOLUMERAMDISTANCETRANSFORM_H #include <modules/base/basemoduledefine.h> #include <inviwo/core/common/inviwo.h> #include <inviwo/core/util/indexmapper.h> #include <inviwo/core/datastructures/volume/volume.h> #include <inviwo/core/datastructures/volume/volumeramprecision.h> #ifndef __clang__ #include <omp.h> #endif namespace inviwo { namespace util { /** * Implementation of Euclidean Distance Transform according to Saito's algorithm: * T. Saito and J.I. Toriwaki. New algorithms for Euclidean distance transformations * of an n-dimensional digitized picture with applications. Pattern Recognition, 27(11). * pp. 1551-1565, 1994. * http://www.cs.jhu.edu/~misha/ReadingSeminar/Papers/Saito94.pdf * * Calculates the distance in grid index space * * Predicate is a function of type (const T &value) -> bool to deside if a value in the input * is a "feature". * * ValueTransform is a function of type (const U& squaredDist) -> U that is appiled to all * squared distance values at the end of the calculation. * * ProcessCallback is a function of type (double progress) -> void that is called with a value * from 0 to 1 to indicate the progress of the calculation. */ template <typename T, typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void volumeRAMDistanceTransform(const VolumeRAMPrecision<T> *inVolume, VolumeRAMPrecision<U> *outDistanceField, const Matrix<3, U> basis, const size3_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback); template <typename T, typename U> void volumeRAMDistanceTransform(const VolumeRAMPrecision<T> *inVolume, VolumeRAMPrecision<U> *outDistanceField, const Matrix<3, U> basis, const size3_t upsample); template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField, const size3_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback); template <typename U, typename ProgressCallback> void volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField, const size3_t upsample, double threshold, bool normalize, bool flip, bool square, double scale, ProgressCallback callback); template <typename U> void volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField, const size3_t upsample, double threshold, bool normalize, bool flip, bool square, double scale); } // namespace util template <typename T, typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void util::volumeRAMDistanceTransform(const VolumeRAMPrecision<T> *inVolume, VolumeRAMPrecision<U> *outDistanceField, const Matrix<3, U> basis, const size3_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback) { #ifndef __clang__ omp_set_num_threads(std::thread::hardware_concurrency()); #endif using int64 = glm::int64; auto square = [](auto a) { return a * a; }; callback(0.0); const T *src = inVolume->getDataTyped(); U *dst = outDistanceField->getDataTyped(); const i64vec3 srcDim{inVolume->getDimensions()}; const i64vec3 dstDim{outDistanceField->getDimensions()}; const i64vec3 sm{upsample}; const auto squareBasis = glm::transpose(basis) * basis; const Vector<3, U> squareBasisDiag{squareBasis[0][0], squareBasis[1][1], squareBasis[2][2]}; const Vector<3, U> squareVoxelSize{squareBasisDiag / Vector<3, U>{dstDim * dstDim}}; const Vector<3, U> invSquareVoxelSize{Vector<3, U>{1.0f} / squareVoxelSize}; { const auto maxdist = glm::compMax(squareBasisDiag); bool orthogonal = true; for (size_t i = 0; i < squareBasis.length(); i++) { for (size_t j = 0; j < squareBasis.length(); j++) { if (i != j) { if (std::abs(squareBasis[i][j]) > 10.0e-8 * maxdist) { orthogonal = false; break; } } } } if (!orthogonal) { LogWarnCustom( "volumeRAMDistanceTransform", "Calculating the distance transform on a non-orthogonal volume will not give " "correct values"); } } if (srcDim * sm != dstDim) { throw Exception( "DistanceTransformRAM: Dimensions does not match src = " + toString(srcDim) + " dst = " + toString(dstDim) + " scaling = " + toString(sm), IVW_CONTEXT_CUSTOM("volumeRAMDistanceTransform")); } util::IndexMapper<3, int64> srcInd(srcDim); util::IndexMapper<3, int64> dstInd(dstDim); auto is_feature = [&](const int64 x, const int64 y, const int64 z) { return predicate(src[srcInd(x / sm.x, y / sm.y, z / sm.z)]); }; // first pass, forward and backward scan along x // result: min distance in x direction #pragma omp parallel for for (int64 z = 0; z < dstDim.z; ++z) { for (int64 y = 0; y < dstDim.y; ++y) { // forward U dist = static_cast<U>(dstDim.x); for (int64 x = 0; x < dstDim.x; ++x) { if (!is_feature(x, y, z)) { ++dist; } else { dist = U(0); } dst[dstInd(x, y, z)] = squareVoxelSize.x * square(dist); } // backward dist = static_cast<U>(dstDim.x); for (int64 x = dstDim.x - 1; x >= 0; --x) { if (!is_feature(x, y, z)) { ++dist; } else { dist = U(0); } dst[dstInd(x, y, z)] = std::min<U>(dst[dstInd(x, y, z)], squareVoxelSize.x * square(dist)); } } } // second pass, scan y direction // for each voxel v(x,y,z) find min_i(data(x,i,z) + (y - i)^2), 0 <= i < dimY // result: min distance in x and y direction callback(0.3); #pragma omp parallel { std::vector<U> buff; buff.resize(dstDim.y); #pragma omp for for (int64 z = 0; z < dstDim.z; ++z) { for (int64 x = 0; x < dstDim.x; ++x) { // cache column data into temporary buffer for (int64 y = 0; y < dstDim.y; ++y) { buff[y] = dst[dstInd(x, y, z)]; } for (int64 y = 0; y < dstDim.y; ++y) { auto d = buff[y]; if (d != U(0)) { const auto rMax = static_cast<int64>(std::sqrt(d * invSquareVoxelSize.y)) + 1; const auto rStart = std::min(rMax, y - 1); const auto rEnd = std::min(rMax, dstDim.y - y); for (int64 n = -rStart; n < rEnd; ++n) { const auto w = buff[y + n] + squareVoxelSize.y * square(n); if (w < d) d = w; } } dst[dstInd(x, y, z)] = d; } } } } // third pass, scan z direction // for each voxel v(x,y,z) find min_i(data(x,y,i) + (z - i)^2), 0 <= i < dimZ // result: min distance in x and y direction callback(0.6); #pragma omp parallel { std::vector<U> buff; buff.resize(dstDim.z); #pragma omp for for (int64 y = 0; y < dstDim.y; ++y) { for (int64 x = 0; x < dstDim.x; ++x) { // cache column data into temporary buffer for (int64 z = 0; z < dstDim.z; ++z) { buff[z] = dst[dstInd(x, y, z)]; } for (int64 z = 0; z < dstDim.z; ++z) { auto d = buff[z]; if (d != U(0)) { const auto rMax = static_cast<int64>(std::sqrt(d * invSquareVoxelSize.z)) + 1; const auto rStart = std::min(rMax, z - 1); const auto rEnd = std::min(rMax, dstDim.z - z); for (int64 n = -rStart; n < rEnd; ++n) { const auto w = buff[z + n] + squareVoxelSize.z * square(n); if (w < d) d = w; } } dst[dstInd(x, y, z)] = d; } } } } // scale data callback(0.9); const int64 volSize = dstDim.x * dstDim.y * dstDim.z; #pragma omp parallel for for (int64 i = 0; i < volSize; ++i) { dst[i] = valueTransform(dst[i]); } callback(1.0); } template <typename T, typename U> void util::volumeRAMDistanceTransform(const VolumeRAMPrecision<T> *inVolume, VolumeRAMPrecision<U> *outDistanceField, const Matrix<3, U> basis, const size3_t upsample) { util::volumeRAMDistanceTransform( inVolume, outDistanceField, basis, upsample, [](const T &val) { return util::glm_convert_normalized<double>(val) > 0.5; }, [](const U &squareDist) { return static_cast<U>(std::sqrt(static_cast<double>(squareDist))); }, [](double f) {}); } template <typename U, typename Predicate, typename ValueTransform, typename ProgressCallback> void util::volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField, const size3_t upsample, Predicate predicate, ValueTransform valueTransform, ProgressCallback callback) { const auto inputVolumeRep = inVolume->getRepresentation<VolumeRAM>(); inputVolumeRep->dispatch<void, dispatching::filter::Scalars>([&](const auto vrprecision) { volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample, predicate, valueTransform, callback); }); } template <typename U, typename ProgressCallback> void util::volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField, const size3_t upsample, double threshold, bool normalize, bool flip, bool square, double scale, ProgressCallback progress) { const auto inputVolumeRep = inVolume->getRepresentation<VolumeRAM>(); inputVolumeRep->dispatch<void, dispatching::filter::Scalars>([&](const auto vrprecision) { using ValueType = util::PrecisionValueType<decltype(vrprecision)>; const auto predicateIn = [threshold](const ValueType &val) { return val < threshold; }; const auto predicateOut = [threshold](const ValueType &val) { return val > threshold; }; const auto normPredicateIn = [threshold](const ValueType &val) { return util::glm_convert_normalized<double>(val) < threshold; }; const auto normPredicateOut = [threshold](const ValueType &val) { return util::glm_convert_normalized<double>(val) > threshold; }; const auto valTransIdent = [scale](const float &squareDist) { return static_cast<float>(scale * squareDist); }; const auto valTransSqrt = [scale](const float &squareDist) { return static_cast<float>(scale * std::sqrt(squareDist)); }; if (normalize && square && flip) { util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample, normPredicateIn, valTransIdent, progress); } else if (normalize && square && !flip) { util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample, normPredicateOut, valTransIdent, progress); } else if (normalize && !square && flip) { util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample, normPredicateIn, valTransSqrt, progress); } else if (normalize && !square && !flip) { util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample, normPredicateOut, valTransSqrt, progress); } else if (!normalize && square && flip) { util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample, predicateIn, valTransIdent, progress); } else if (!normalize && square && !flip) { util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample, predicateOut, valTransIdent, progress); } else if (!normalize && !square && flip) { util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample, predicateIn, valTransSqrt, progress); } else if (!normalize && !square && !flip) { util::volumeRAMDistanceTransform(vrprecision, outDistanceField, inVolume->getBasis(), upsample, predicateOut, valTransSqrt, progress); } }); } template <typename U> void util::volumeDistanceTransform(const Volume *inVolume, VolumeRAMPrecision<U> *outDistanceField, const size3_t upsample, double threshold, bool normalize, bool flip, bool square, double scale) { util::volumeDistanceTransform(inVolume, outDistanceField, upsample, threshold, normalize, flip, square, scale, [](double) {}); } } // namespace inviwo #endif // IVW_VOLUMERAMDISTANCETRANSFORM_H
zero_length_array_section.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu #include <stdio.h> int main() { int arr[5]; // CHECK: addr=0x[[#%x,HOST_ADDR:]] fprintf(stderr, "addr=%p\n", arr); // CHECK-NOT: Libomptarget #pragma omp target data map(alloc: arr[0:5]) #pragma omp target map(present, alloc: arr[0:0]) ; // CHECK: arr is present fprintf(stderr, "arr is present\n"); // arr[0:0] doesn't create an actual mapping in the first directive. // // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] (0 bytes) // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target data map(alloc: arr[0:0]) #pragma omp target map(present, alloc: arr[0:0]) ; // CHECK-NOT: arr is present fprintf(stderr, "arr is present\n"); return 0; }
create_xgrid.c
/*********************************************************************** * GNU Lesser General Public License * * This file is part of the GFDL Flexible Modeling System (FMS). * * FMS is free software: you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or (at * your option) any later version. * * FMS is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FMS. If not, see <http://www.gnu.org/licenses/>. **********************************************************************/ #include <stdlib.h> #include <stdio.h> #include <math.h> #include "mosaic_util.h" #include "create_xgrid.h" #include "constant.h" #if defined(_OPENMP) #include <omp.h> #endif #define AREA_RATIO_THRESH (1.e-6) #define MASK_THRESH (0.5) #define EPSLN8 (1.e-8) #define EPSLN30 (1.0e-30) #define EPSLN10 (1.0e-10) #define R2D (180/M_PI) #define TPI (2.0*M_PI) /******************************************************************************* int get_maxxgrid return constants MAXXGRID. *******************************************************************************/ int get_maxxgrid(void) { return MAXXGRID; } int get_maxxgrid_(void) { return get_maxxgrid(); } /******************************************************************************* void get_grid_area(const int *nlon, const int *nlat, const double *lon, const double *lat, const double *area) return the grid area. *******************************************************************************/ void get_grid_area_(const int *nlon, const int *nlat, const double *lon, const double *lat, double *area) { get_grid_area(nlon, nlat, lon, lat, area); } void get_grid_area(const int *nlon, const int *nlat, const double *lon, const double *lat, double *area) { int nx, ny, nxp, i, j, n_in; double x_in[20], y_in[20]; nx = *nlon; ny = *nlat; nxp = nx + 1; for(j=0; j<ny; j++) for(i=0; i < nx; i++) { x_in[0] = lon[j*nxp+i]; x_in[1] = lon[j*nxp+i+1]; x_in[2] = lon[(j+1)*nxp+i+1]; x_in[3] = lon[(j+1)*nxp+i]; y_in[0] = lat[j*nxp+i]; y_in[1] = lat[j*nxp+i+1]; y_in[2] = lat[(j+1)*nxp+i+1]; y_in[3] = lat[(j+1)*nxp+i]; n_in = fix_lon(x_in, y_in, 4, M_PI); area[j*nx+i] = poly_area(x_in, y_in, n_in); } } /* get_grid_area */ /******************************************************************************* void get_grid_area_ug(const int *npts, const double *lon, const double *lat, const double *area) return the grid area. *******************************************************************************/ void get_grid_area_ug_(const int *npts, const double *lon, const double *lat, double *area) { get_grid_area_ug(npts, lon, lat, area); } void get_grid_area_ug(const int *npts, const double *lon, const double *lat, double *area) { int nl, l, n_in, nv; double x_in[20], y_in[20]; nl = *npts; nv = 4; for(l=0; l<nl; l++) { x_in[0] = lon[l*nv]; x_in[1] = lon[l*nv+1]; x_in[2] = lon[l*nv+2]; x_in[3] = lon[l*nv+3]; y_in[0] = lat[l*nv]; y_in[1] = lat[l*nv+1]; y_in[2] = lat[l*nv+2]; y_in[3] = lat[l*nv+3]; n_in = fix_lon(x_in, y_in, nv, M_PI); area[l] = poly_area(x_in, y_in, n_in); } } /* get_grid_area_ug */ void get_grid_great_circle_area_(const int *nlon, const int *nlat, const double *lon, const double *lat, double *area) { get_grid_great_circle_area(nlon, nlat, lon, lat, area); } void get_grid_great_circle_area(const int *nlon, const int *nlat, const double *lon, const double *lat, double *area) { int nx, ny, nxp, nyp, i, j; int n0, n1, n2, n3; struct Node *grid=NULL; double *x=NULL, *y=NULL, *z=NULL; nx = *nlon; ny = *nlat; nxp = nx + 1; nyp = ny + 1; x = (double *)malloc(nxp*nyp*sizeof(double)); y = (double *)malloc(nxp*nyp*sizeof(double)); z = (double *)malloc(nxp*nyp*sizeof(double)); latlon2xyz(nxp*nyp, lon, lat, x, y, z); for(j=0; j<ny; j++) for(i=0; i < nx; i++) { /* clockwise */ n0 = j*nxp+i; n1 = (j+1)*nxp+i; n2 = (j+1)*nxp+i+1; n3 = j*nxp+i+1; rewindList(); grid = getNext(); addEnd(grid, x[n0], y[n0], z[n0], 0, 0, 0, -1); addEnd(grid, x[n1], y[n1], z[n1], 0, 0, 0, -1); addEnd(grid, x[n2], y[n2], z[n2], 0, 0, 0, -1); addEnd(grid, x[n3], y[n3], z[n3], 0, 0, 0, -1); area[j*nx+i] = gridArea(grid); } free(x); free(y); free(z); } /* get_grid_great_circle_area */ void get_grid_great_circle_area_ug_(const int *npts, const double *lon, const double *lat, double *area) { get_grid_great_circle_area_ug(npts, lon, lat, area); } void get_grid_great_circle_area_ug(const int *npts, const double *lon, const double *lat, double *area) { int l, nl, nv; int n0, n1, n2, n3; struct Node *grid=NULL; double *x=NULL, *y=NULL, *z=NULL; nl = *npts; nv = 4; x = (double *)malloc(nl*nv*sizeof(double)); y = (double *)malloc(nl*nv*sizeof(double)); z = (double *)malloc(nl*nv*sizeof(double)); latlon2xyz(nl*nv, lon, lat, x, y, z); for(l=0; l<nv; l++) { /* clockwise */ n0 = l*nv; n1 = l*nv+1; n2 = l*nv+2; n3 = l*nv+3; rewindList(); grid = getNext(); addEnd(grid, x[n0], y[n0], z[n0], 0, 0, 0, -1); addEnd(grid, x[n1], y[n1], z[n1], 0, 0, 0, -1); addEnd(grid, x[n2], y[n2], z[n2], 0, 0, 0, -1); addEnd(grid, x[n3], y[n3], z[n3], 0, 0, 0, -1); area[l] = gridArea(grid); } free(x); free(y); free(z); } /* get_grid_great_circle_area_ug */ void get_grid_area_dimensionless(const int *nlon, const int *nlat, const double *lon, const double *lat, double *area) { int nx, ny, nxp, i, j, n_in; double x_in[20], y_in[20]; nx = *nlon; ny = *nlat; nxp = nx + 1; for(j=0; j<ny; j++) for(i=0; i < nx; i++) { x_in[0] = lon[j*nxp+i]; x_in[1] = lon[j*nxp+i+1]; x_in[2] = lon[(j+1)*nxp+i+1]; x_in[3] = lon[(j+1)*nxp+i]; y_in[0] = lat[j*nxp+i]; y_in[1] = lat[j*nxp+i+1]; y_in[2] = lat[(j+1)*nxp+i+1]; y_in[3] = lat[(j+1)*nxp+i]; n_in = fix_lon(x_in, y_in, 4, M_PI); area[j*nx+i] = poly_area_dimensionless(x_in, y_in, n_in); } } /* get_grid_area */ void get_grid_area_no_adjust(const int *nlon, const int *nlat, const double *lon, const double *lat, double *area) { int nx, ny, nxp, i, j, n_in; double x_in[20], y_in[20]; nx = *nlon; ny = *nlat; nxp = nx + 1; for(j=0; j<ny; j++) for(i=0; i < nx; i++) { x_in[0] = lon[j*nxp+i]; x_in[1] = lon[j*nxp+i+1]; x_in[2] = lon[(j+1)*nxp+i+1]; x_in[3] = lon[(j+1)*nxp+i]; y_in[0] = lat[j*nxp+i]; y_in[1] = lat[j*nxp+i+1]; y_in[2] = lat[(j+1)*nxp+i+1]; y_in[3] = lat[(j+1)*nxp+i]; n_in = 4; area[j*nx+i] = poly_area_no_adjust(x_in, y_in, n_in); } } /* get_grid_area_no_adjust */ /******************************************************************************* void create_xgrid_1dx2d_order1 This routine generate exchange grids between two grids for the first order conservative interpolation. nlon_in,nlat_in,nlon_out,nlat_out are the size of the grid cell and lon_in,lat_in are 1-D grid bounds, lon_out,lat_out are geographic grid location of grid cell bounds. *******************************************************************************/ int create_xgrid_1dx2d_order1_(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area) { int nxgrid; nxgrid = create_xgrid_1dx2d_order1(nlon_in, nlat_in, nlon_out, nlat_out, lon_in, lat_in, lon_out, lat_out, mask_in, i_in, j_in, i_out, j_out, xgrid_area); return nxgrid; } int create_xgrid_1dx2d_order1(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area) { int nx1, ny1, nx2, ny2, nx1p, nx2p; int i1, j1, i2, j2, nxgrid; double ll_lon, ll_lat, ur_lon, ur_lat, x_in[MV], y_in[MV], x_out[MV], y_out[MV]; double *area_in, *area_out, min_area; double *tmpx, *tmpy; nx1 = *nlon_in; ny1 = *nlat_in; nx2 = *nlon_out; ny2 = *nlat_out; nxgrid = 0; nx1p = nx1 + 1; nx2p = nx2 + 1; area_in = (double *)malloc(nx1*ny1*sizeof(double)); area_out = (double *)malloc(nx2*ny2*sizeof(double)); tmpx = (double *)malloc((nx1+1)*(ny1+1)*sizeof(double)); tmpy = (double *)malloc((nx1+1)*(ny1+1)*sizeof(double)); for(j1=0; j1<=ny1; j1++) for(i1=0; i1<=nx1; i1++) { tmpx[j1*nx1p+i1] = lon_in[i1]; tmpy[j1*nx1p+i1] = lat_in[j1]; } /* This is just a temporary fix to solve the issue that there is one point in zonal direction */ if(nx1 > 1) get_grid_area(nlon_in, nlat_in, tmpx, tmpy, area_in); else get_grid_area_no_adjust(nlon_in, nlat_in, tmpx, tmpy, area_in); get_grid_area(nlon_out, nlat_out, lon_out, lat_out, area_out); free(tmpx); free(tmpy); for(j1=0; j1<ny1; j1++) for(i1=0; i1<nx1; i1++) if( mask_in[j1*nx1+i1] > MASK_THRESH ) { ll_lon = lon_in[i1]; ll_lat = lat_in[j1]; ur_lon = lon_in[i1+1]; ur_lat = lat_in[j1+1]; for(j2=0; j2<ny2; j2++) for(i2=0; i2<nx2; i2++) { int n_in, n_out; double Xarea; y_in[0] = lat_out[j2*nx2p+i2]; y_in[1] = lat_out[j2*nx2p+i2+1]; y_in[2] = lat_out[(j2+1)*nx2p+i2+1]; y_in[3] = lat_out[(j2+1)*nx2p+i2]; if ( (y_in[0]<=ll_lat) && (y_in[1]<=ll_lat) && (y_in[2]<=ll_lat) && (y_in[3]<=ll_lat) ) continue; if ( (y_in[0]>=ur_lat) && (y_in[1]>=ur_lat) && (y_in[2]>=ur_lat) && (y_in[3]>=ur_lat) ) continue; x_in[0] = lon_out[j2*nx2p+i2]; x_in[1] = lon_out[j2*nx2p+i2+1]; x_in[2] = lon_out[(j2+1)*nx2p+i2+1]; x_in[3] = lon_out[(j2+1)*nx2p+i2]; n_in = fix_lon(x_in, y_in, 4, (ll_lon+ur_lon)/2); if ( (n_out = clip ( x_in, y_in, n_in, ll_lon, ll_lat, ur_lon, ur_lat, x_out, y_out )) > 0 ) { Xarea = poly_area (x_out, y_out, n_out ) * mask_in[j1*nx1+i1]; min_area = min(area_in[j1*nx1+i1], area_out[j2*nx2+i2]); if( Xarea/min_area > AREA_RATIO_THRESH ) { xgrid_area[nxgrid] = Xarea; i_in[nxgrid] = i1; j_in[nxgrid] = j1; i_out[nxgrid] = i2; j_out[nxgrid] = j2; ++nxgrid; if(nxgrid > MAXXGRID) error_handler("nxgrid is greater than MAXXGRID, increase MAXXGRID"); } } } } free(area_in); free(area_out); return nxgrid; } /* create_xgrid_1dx2d_order1 */ /******************************************************************************* void create_xgrid_1dx2d_order1_ug This routine generate exchange grids between two grids for the first order conservative interpolation. nlon_in,nlat_in,nlon_out,nlat_out are the size of the grid cell and lon_in,lat_in are 1-D grid bounds, lon_out,lat_out are geographic grid location of grid cell bounds. *******************************************************************************/ int create_xgrid_1dx2d_order1_ug_(const int *nlon_in, const int *nlat_in, const int *npts_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *l_out, double *xgrid_area) { int nxgrid; nxgrid = create_xgrid_1dx2d_order1_ug(nlon_in, nlat_in, npts_out, lon_in, lat_in, lon_out, lat_out, mask_in, i_in, j_in, l_out, xgrid_area); return nxgrid; } int create_xgrid_1dx2d_order1_ug(const int *nlon_in, const int *nlat_in, const int *npts_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *l_out, double *xgrid_area) { int nx1, ny1, nx1p, nv, npts2; int i1, j1, l2, nxgrid; double ll_lon, ll_lat, ur_lon, ur_lat, x_in[MV], y_in[MV], x_out[MV], y_out[MV]; double *area_in, *area_out, min_area; double *tmpx, *tmpy; nx1 = *nlon_in; ny1 = *nlat_in; nv = 4; npts2 = *npts_out; nxgrid = 0; nx1p = nx1 + 1; area_in = (double *)malloc(nx1*ny1*sizeof(double)); area_out = (double *)malloc(npts2*sizeof(double)); tmpx = (double *)malloc((nx1+1)*(ny1+1)*sizeof(double)); tmpy = (double *)malloc((nx1+1)*(ny1+1)*sizeof(double)); for(j1=0; j1<=ny1; j1++) for(i1=0; i1<=nx1; i1++) { tmpx[j1*nx1p+i1] = lon_in[i1]; tmpy[j1*nx1p+i1] = lat_in[j1]; } /* This is just a temporary fix to solve the issue that there is one point in zonal direction */ if(nx1 > 1) get_grid_area(nlon_in, nlat_in, tmpx, tmpy, area_in); else get_grid_area_no_adjust(nlon_in, nlat_in, tmpx, tmpy, area_in); get_grid_area_ug(npts_out, lon_out, lat_out, area_out); free(tmpx); free(tmpy); for(j1=0; j1<ny1; j1++) for(i1=0; i1<nx1; i1++) if( mask_in[j1*nx1+i1] > MASK_THRESH ) { ll_lon = lon_in[i1]; ll_lat = lat_in[j1]; ur_lon = lon_in[i1+1]; ur_lat = lat_in[j1+1]; for(l2=0; l2<npts2; l2++) { int n_in, n_out; double Xarea; y_in[0] = lat_out[l2*nv]; y_in[1] = lat_out[l2*nv+1]; y_in[2] = lat_out[l2*nv+2]; y_in[3] = lat_out[l2*nv+3]; if ( (y_in[0]<=ll_lat) && (y_in[1]<=ll_lat) && (y_in[2]<=ll_lat) && (y_in[3]<=ll_lat) ) continue; if ( (y_in[0]>=ur_lat) && (y_in[1]>=ur_lat) && (y_in[2]>=ur_lat) && (y_in[3]>=ur_lat) ) continue; x_in[0] = lon_out[l2*nv]; x_in[1] = lon_out[l2*nv+1]; x_in[2] = lon_out[l2*nv+2]; x_in[3] = lon_out[l2*nv+3]; n_in = fix_lon(x_in, y_in, 4, (ll_lon+ur_lon)/2); if ( (n_out = clip ( x_in, y_in, n_in, ll_lon, ll_lat, ur_lon, ur_lat, x_out, y_out )) > 0 ) { Xarea = poly_area (x_out, y_out, n_out ) * mask_in[j1*nx1+i1]; min_area = min(area_in[j1*nx1+i1], area_out[l2]); if( Xarea/min_area > AREA_RATIO_THRESH ) { xgrid_area[nxgrid] = Xarea; i_in[nxgrid] = i1; j_in[nxgrid] = j1; l_out[nxgrid] = l2; ++nxgrid; if(nxgrid > MAXXGRID) error_handler("nxgrid is greater than MAXXGRID, increase MAXXGRID"); } } } } free(area_in); free(area_out); return nxgrid; } /* create_xgrid_1dx2d_order1_ug */ /******************************************************************************** void create_xgrid_1dx2d_order2 This routine generate exchange grids between two grids for the second order conservative interpolation. nlon_in,nlat_in,nlon_out,nlat_out are the size of the grid cell and lon_in,lat_in are 1-D grid bounds, lon_out,lat_out are geographic grid location of grid cell bounds. ********************************************************************************/ int create_xgrid_1dx2d_order2_(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area, double *xgrid_clon, double *xgrid_clat) { int nxgrid; nxgrid = create_xgrid_1dx2d_order2(nlon_in, nlat_in, nlon_out, nlat_out, lon_in, lat_in, lon_out, lat_out, mask_in, i_in, j_in, i_out, j_out, xgrid_area, xgrid_clon, xgrid_clat); return nxgrid; } int create_xgrid_1dx2d_order2(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area, double *xgrid_clon, double *xgrid_clat) { int nx1, ny1, nx2, ny2, nx1p, nx2p; int i1, j1, i2, j2, nxgrid; double ll_lon, ll_lat, ur_lon, ur_lat, x_in[MV], y_in[MV], x_out[MV], y_out[MV]; double *area_in, *area_out, min_area; double *tmpx, *tmpy; nx1 = *nlon_in; ny1 = *nlat_in; nx2 = *nlon_out; ny2 = *nlat_out; nxgrid = 0; nx1p = nx1 + 1; nx2p = nx2 + 1; area_in = (double *)malloc(nx1*ny1*sizeof(double)); area_out = (double *)malloc(nx2*ny2*sizeof(double)); tmpx = (double *)malloc((nx1+1)*(ny1+1)*sizeof(double)); tmpy = (double *)malloc((nx1+1)*(ny1+1)*sizeof(double)); for(j1=0; j1<=ny1; j1++) for(i1=0; i1<=nx1; i1++) { tmpx[j1*nx1p+i1] = lon_in[i1]; tmpy[j1*nx1p+i1] = lat_in[j1]; } get_grid_area(nlon_in, nlat_in, tmpx, tmpy, area_in); get_grid_area(nlon_out, nlat_out, lon_out, lat_out, area_out); free(tmpx); free(tmpy); for(j1=0; j1<ny1; j1++) for(i1=0; i1<nx1; i1++) if( mask_in[j1*nx1+i1] > MASK_THRESH ) { ll_lon = lon_in[i1]; ll_lat = lat_in[j1]; ur_lon = lon_in[i1+1]; ur_lat = lat_in[j1+1]; for(j2=0; j2<ny2; j2++) for(i2=0; i2<nx2; i2++) { int n_in, n_out; double xarea, lon_in_avg; y_in[0] = lat_out[j2*nx2p+i2]; y_in[1] = lat_out[j2*nx2p+i2+1]; y_in[2] = lat_out[(j2+1)*nx2p+i2+1]; y_in[3] = lat_out[(j2+1)*nx2p+i2]; if ( (y_in[0]<=ll_lat) && (y_in[1]<=ll_lat) && (y_in[2]<=ll_lat) && (y_in[3]<=ll_lat) ) continue; if ( (y_in[0]>=ur_lat) && (y_in[1]>=ur_lat) && (y_in[2]>=ur_lat) && (y_in[3]>=ur_lat) ) continue; x_in[0] = lon_out[j2*nx2p+i2]; x_in[1] = lon_out[j2*nx2p+i2+1]; x_in[2] = lon_out[(j2+1)*nx2p+i2+1]; x_in[3] = lon_out[(j2+1)*nx2p+i2]; n_in = fix_lon(x_in, y_in, 4, (ll_lon+ur_lon)/2); lon_in_avg = avgval_double(n_in, x_in); if ( (n_out = clip ( x_in, y_in, n_in, ll_lon, ll_lat, ur_lon, ur_lat, x_out, y_out )) > 0 ) { xarea = poly_area (x_out, y_out, n_out ) * mask_in[j1*nx1+i1]; min_area = min(area_in[j1*nx1+i1], area_out[j2*nx2+i2]); if(xarea/min_area > AREA_RATIO_THRESH ) { xgrid_area[nxgrid] = xarea; xgrid_clon[nxgrid] = poly_ctrlon(x_out, y_out, n_out, lon_in_avg); xgrid_clat[nxgrid] = poly_ctrlat (x_out, y_out, n_out ); i_in[nxgrid] = i1; j_in[nxgrid] = j1; i_out[nxgrid] = i2; j_out[nxgrid] = j2; ++nxgrid; if(nxgrid > MAXXGRID) error_handler("nxgrid is greater than MAXXGRID, increase MAXXGRID"); } } } } free(area_in); free(area_out); return nxgrid; } /* create_xgrid_1dx2d_order2 */ /******************************************************************************* void create_xgrid_2dx1d_order1 This routine generate exchange grids between two grids for the first order conservative interpolation. nlon_in,nlat_in,nlon_out,nlat_out are the size of the grid cell and lon_out,lat_out are 1-D grid bounds, lon_in,lat_in are geographic grid location of grid cell bounds. mask is on grid lon_in/lat_in. *******************************************************************************/ int create_xgrid_2dx1d_order1_(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area) { int nxgrid; nxgrid = create_xgrid_2dx1d_order1(nlon_in, nlat_in, nlon_out, nlat_out, lon_in, lat_in, lon_out, lat_out, mask_in, i_in, j_in, i_out, j_out, xgrid_area); return nxgrid; } int create_xgrid_2dx1d_order1(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area) { int nx1, ny1, nx2, ny2, nx1p, nx2p; int i1, j1, i2, j2, nxgrid; double ll_lon, ll_lat, ur_lon, ur_lat, x_in[MV], y_in[MV], x_out[MV], y_out[MV]; double *area_in, *area_out, min_area; double *tmpx, *tmpy; int n_in, n_out; double Xarea; nx1 = *nlon_in; ny1 = *nlat_in; nx2 = *nlon_out; ny2 = *nlat_out; nxgrid = 0; nx1p = nx1 + 1; nx2p = nx2 + 1; area_in = (double *)malloc(nx1*ny1*sizeof(double)); area_out = (double *)malloc(nx2*ny2*sizeof(double)); tmpx = (double *)malloc((nx2+1)*(ny2+1)*sizeof(double)); tmpy = (double *)malloc((nx2+1)*(ny2+1)*sizeof(double)); for(j2=0; j2<=ny2; j2++) for(i2=0; i2<=nx2; i2++) { tmpx[j2*nx2p+i2] = lon_out[i2]; tmpy[j2*nx2p+i2] = lat_out[j2]; } get_grid_area(nlon_in, nlat_in, lon_in, lat_in, area_in); get_grid_area(nlon_out, nlat_out, tmpx, tmpy, area_out); free(tmpx); free(tmpy); for(j2=0; j2<ny2; j2++) for(i2=0; i2<nx2; i2++) { ll_lon = lon_out[i2]; ll_lat = lat_out[j2]; ur_lon = lon_out[i2+1]; ur_lat = lat_out[j2+1]; for(j1=0; j1<ny1; j1++) for(i1=0; i1<nx1; i1++) if( mask_in[j1*nx1+i1] > MASK_THRESH ) { y_in[0] = lat_in[j1*nx1p+i1]; y_in[1] = lat_in[j1*nx1p+i1+1]; y_in[2] = lat_in[(j1+1)*nx1p+i1+1]; y_in[3] = lat_in[(j1+1)*nx1p+i1]; if ( (y_in[0]<=ll_lat) && (y_in[1]<=ll_lat) && (y_in[2]<=ll_lat) && (y_in[3]<=ll_lat) ) continue; if ( (y_in[0]>=ur_lat) && (y_in[1]>=ur_lat) && (y_in[2]>=ur_lat) && (y_in[3]>=ur_lat) ) continue; x_in[0] = lon_in[j1*nx1p+i1]; x_in[1] = lon_in[j1*nx1p+i1+1]; x_in[2] = lon_in[(j1+1)*nx1p+i1+1]; x_in[3] = lon_in[(j1+1)*nx1p+i1]; n_in = fix_lon(x_in, y_in, 4, (ll_lon+ur_lon)/2); if ( (n_out = clip ( x_in, y_in, n_in, ll_lon, ll_lat, ur_lon, ur_lat, x_out, y_out )) > 0 ) { Xarea = poly_area ( x_out, y_out, n_out ) * mask_in[j1*nx1+i1]; min_area = min(area_in[j1*nx1+i1], area_out[j2*nx2+i2]); if( Xarea/min_area > AREA_RATIO_THRESH ) { xgrid_area[nxgrid] = Xarea; i_in[nxgrid] = i1; j_in[nxgrid] = j1; i_out[nxgrid] = i2; j_out[nxgrid] = j2; ++nxgrid; if(nxgrid > MAXXGRID) error_handler("nxgrid is greater than MAXXGRID, increase MAXXGRID"); } } } } free(area_in); free(area_out); return nxgrid; } /* create_xgrid_2dx1d_order1 */ /******************************************************************************** void create_xgrid_2dx1d_order2 This routine generate exchange grids between two grids for the second order conservative interpolation. nlon_in,nlat_in,nlon_out,nlat_out are the size of the grid cell and lon_out,lat_out are 1-D grid bounds, lon_in,lat_in are geographic grid location of grid cell bounds. mask is on grid lon_in/lat_in. ********************************************************************************/ int create_xgrid_2dx1d_order2_(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area, double *xgrid_clon, double *xgrid_clat) { int nxgrid; nxgrid = create_xgrid_2dx1d_order2(nlon_in, nlat_in, nlon_out, nlat_out, lon_in, lat_in, lon_out, lat_out, mask_in, i_in, j_in, i_out, j_out, xgrid_area, xgrid_clon, xgrid_clat); return nxgrid; } int create_xgrid_2dx1d_order2(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area, double *xgrid_clon, double *xgrid_clat) { int nx1, ny1, nx2, ny2, nx1p, nx2p; int i1, j1, i2, j2, nxgrid; double ll_lon, ll_lat, ur_lon, ur_lat, x_in[MV], y_in[MV], x_out[MV], y_out[MV]; double *tmpx, *tmpy; double *area_in, *area_out, min_area; double lon_in_avg; int n_in, n_out; double xarea; nx1 = *nlon_in; ny1 = *nlat_in; nx2 = *nlon_out; ny2 = *nlat_out; nxgrid = 0; nx1p = nx1 + 1; nx2p = nx2 + 1; area_in = (double *)malloc(nx1*ny1*sizeof(double)); area_out = (double *)malloc(nx2*ny2*sizeof(double)); tmpx = (double *)malloc((nx2+1)*(ny2+1)*sizeof(double)); tmpy = (double *)malloc((nx2+1)*(ny2+1)*sizeof(double)); for(j2=0; j2<=ny2; j2++) for(i2=0; i2<=nx2; i2++) { tmpx[j2*nx2p+i2] = lon_out[i2]; tmpy[j2*nx2p+i2] = lat_out[j2]; } get_grid_area(nlon_in, nlat_in, lon_in, lat_in, area_in); get_grid_area(nlon_out, nlat_out, tmpx, tmpy, area_out); free(tmpx); free(tmpy); for(j2=0; j2<ny2; j2++) for(i2=0; i2<nx2; i2++) { ll_lon = lon_out[i2]; ll_lat = lat_out[j2]; ur_lon = lon_out[i2+1]; ur_lat = lat_out[j2+1]; for(j1=0; j1<ny1; j1++) for(i1=0; i1<nx1; i1++) if( mask_in[j1*nx1+i1] > MASK_THRESH ) { y_in[0] = lat_in[j1*nx1p+i1]; y_in[1] = lat_in[j1*nx1p+i1+1]; y_in[2] = lat_in[(j1+1)*nx1p+i1+1]; y_in[3] = lat_in[(j1+1)*nx1p+i1]; if ( (y_in[0]<=ll_lat) && (y_in[1]<=ll_lat) && (y_in[2]<=ll_lat) && (y_in[3]<=ll_lat) ) continue; if ( (y_in[0]>=ur_lat) && (y_in[1]>=ur_lat) && (y_in[2]>=ur_lat) && (y_in[3]>=ur_lat) ) continue; x_in[0] = lon_in[j1*nx1p+i1]; x_in[1] = lon_in[j1*nx1p+i1+1]; x_in[2] = lon_in[(j1+1)*nx1p+i1+1]; x_in[3] = lon_in[(j1+1)*nx1p+i1]; n_in = fix_lon(x_in, y_in, 4, (ll_lon+ur_lon)/2); lon_in_avg = avgval_double(n_in, x_in); if ( (n_out = clip ( x_in, y_in, n_in, ll_lon, ll_lat, ur_lon, ur_lat, x_out, y_out )) > 0 ) { xarea = poly_area (x_out, y_out, n_out ) * mask_in[j1*nx1+i1]; min_area = min(area_in[j1*nx1+i1], area_out[j2*nx2+i2]); if(xarea/min_area > AREA_RATIO_THRESH ) { xgrid_area[nxgrid] = xarea; xgrid_clon[nxgrid] = poly_ctrlon(x_out, y_out, n_out, lon_in_avg); xgrid_clat[nxgrid] = poly_ctrlat (x_out, y_out, n_out ); i_in[nxgrid] = i1; j_in[nxgrid] = j1; i_out[nxgrid] = i2; j_out[nxgrid] = j2; ++nxgrid; if(nxgrid > MAXXGRID) error_handler("nxgrid is greater than MAXXGRID, increase MAXXGRID"); } } } } free(area_in); free(area_out); return nxgrid; } /* create_xgrid_2dx1d_order2 */ /******************************************************************************* void create_xgrid_2DX2D_order1 This routine generate exchange grids between two grids for the first order conservative interpolation. nlon_in,nlat_in,nlon_out,nlat_out are the size of the grid cell and lon_in,lat_in, lon_out,lat_out are geographic grid location of grid cell bounds. mask is on grid lon_in/lat_in. *******************************************************************************/ int create_xgrid_2dx2d_order1_(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area) { int nxgrid; nxgrid = create_xgrid_2dx2d_order1(nlon_in, nlat_in, nlon_out, nlat_out, lon_in, lat_in, lon_out, lat_out, mask_in, i_in, j_in, i_out, j_out, xgrid_area); return nxgrid; } int create_xgrid_2dx2d_order1(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area) { #define MAX_V 8 int nx1, nx2, ny1, ny2, nx1p, nx2p, nxgrid; double *area_in, *area_out; int nblocks =1; int *istart2=NULL, *iend2=NULL; int npts_left, nblks_left, pos, m, npts_my, ij; double *lon_out_min_list,*lon_out_max_list,*lon_out_avg,*lat_out_min_list,*lat_out_max_list; double *lon_out_list, *lat_out_list; int *pnxgrid=NULL, *pstart; int *pi_in=NULL, *pj_in=NULL, *pi_out=NULL, *pj_out=NULL; double *pxgrid_area=NULL; int *n2_list; int nthreads, nxgrid_block_max; nx1 = *nlon_in; ny1 = *nlat_in; nx2 = *nlon_out; ny2 = *nlat_out; nx1p = nx1 + 1; nx2p = nx2 + 1; area_in = (double *)malloc(nx1*ny1*sizeof(double)); area_out = (double *)malloc(nx2*ny2*sizeof(double)); get_grid_area(nlon_in, nlat_in, lon_in, lat_in, area_in); get_grid_area(nlon_out, nlat_out, lon_out, lat_out, area_out); nthreads = 1; #if defined(_OPENMP) #pragma omp parallel nthreads = omp_get_num_threads(); #endif nblocks = nthreads; istart2 = (int *)malloc(nblocks*sizeof(int)); iend2 = (int *)malloc(nblocks*sizeof(int)); pstart = (int *)malloc(nblocks*sizeof(int)); pnxgrid = (int *)malloc(nblocks*sizeof(int)); nxgrid_block_max = MAXXGRID/nblocks; for(m=0; m<nblocks; m++) { pnxgrid[m] = 0; pstart[m] = m*nxgrid_block_max; } if(nblocks == 1) { pi_in = i_in; pj_in = j_in; pi_out = i_out; pj_out = j_out; pxgrid_area = xgrid_area; } else { pi_in = (int *)malloc(MAXXGRID*sizeof(int)); pj_in = (int *)malloc(MAXXGRID*sizeof(int)); pi_out = (int *)malloc(MAXXGRID*sizeof(int)); pj_out = (int *)malloc(MAXXGRID*sizeof(int)); pxgrid_area = (double *)malloc(MAXXGRID*sizeof(double)); } npts_left = nx2*ny2; nblks_left = nblocks; pos = 0; for(m=0; m<nblocks; m++) { istart2[m] = pos; npts_my = npts_left/nblks_left; iend2[m] = istart2[m] + npts_my - 1; pos = iend2[m] + 1; npts_left -= npts_my; nblks_left--; } lon_out_min_list = (double *)malloc(nx2*ny2*sizeof(double)); lon_out_max_list = (double *)malloc(nx2*ny2*sizeof(double)); lat_out_min_list = (double *)malloc(nx2*ny2*sizeof(double)); lat_out_max_list = (double *)malloc(nx2*ny2*sizeof(double)); lon_out_avg = (double *)malloc(nx2*ny2*sizeof(double)); n2_list = (int *)malloc(nx2*ny2*sizeof(int)); lon_out_list = (double *)malloc(MAX_V*nx2*ny2*sizeof(double)); lat_out_list = (double *)malloc(MAX_V*nx2*ny2*sizeof(double)); #if defined(_OPENMP) #pragma omp parallel for default(none) shared(nx2,ny2,nx2p,lon_out,lat_out,lat_out_min_list, \ lat_out_max_list,lon_out_min_list,lon_out_max_list, \ lon_out_avg,n2_list,lon_out_list,lat_out_list) #endif for(ij=0; ij<nx2*ny2; ij++){ int i2, j2, n, n0, n1, n2, n3, n2_in, l; double x2_in[MV], y2_in[MV]; i2 = ij%nx2; j2 = ij/nx2; n = j2*nx2+i2; n0 = j2*nx2p+i2; n1 = j2*nx2p+i2+1; n2 = (j2+1)*nx2p+i2+1; n3 = (j2+1)*nx2p+i2; x2_in[0] = lon_out[n0]; y2_in[0] = lat_out[n0]; x2_in[1] = lon_out[n1]; y2_in[1] = lat_out[n1]; x2_in[2] = lon_out[n2]; y2_in[2] = lat_out[n2]; x2_in[3] = lon_out[n3]; y2_in[3] = lat_out[n3]; lat_out_min_list[n] = minval_double(4, y2_in); lat_out_max_list[n] = maxval_double(4, y2_in); n2_in = fix_lon(x2_in, y2_in, 4, M_PI); if(n2_in > MAX_V) error_handler("create_xgrid.c: n2_in is greater than MAX_V"); lon_out_min_list[n] = minval_double(n2_in, x2_in); lon_out_max_list[n] = maxval_double(n2_in, x2_in); lon_out_avg[n] = avgval_double(n2_in, x2_in); n2_list[n] = n2_in; for(l=0; l<n2_in; l++) { lon_out_list[n*MAX_V+l] = x2_in[l]; lat_out_list[n*MAX_V+l] = y2_in[l]; } } nxgrid = 0; #if defined(_OPENMP) #pragma omp parallel for default(none) shared(nblocks,nx1,ny1,nx1p,mask_in,lon_in,lat_in, \ istart2,iend2,nx2,lat_out_min_list,lat_out_max_list, \ n2_list,lon_out_list,lat_out_list,lon_out_min_list, \ lon_out_max_list,lon_out_avg,area_in,area_out, \ pxgrid_area,pnxgrid,pi_in,pj_in,pi_out,pj_out,pstart,nthreads) #endif for(m=0; m<nblocks; m++) { int i1, j1, ij; for(j1=0; j1<ny1; j1++) for(i1=0; i1<nx1; i1++) if( mask_in[j1*nx1+i1] > MASK_THRESH ) { int n0, n1, n2, n3, l,n1_in; double lat_in_min,lat_in_max,lon_in_min,lon_in_max,lon_in_avg; double x1_in[MV], y1_in[MV], x_out[MV], y_out[MV]; n0 = j1*nx1p+i1; n1 = j1*nx1p+i1+1; n2 = (j1+1)*nx1p+i1+1; n3 = (j1+1)*nx1p+i1; x1_in[0] = lon_in[n0]; y1_in[0] = lat_in[n0]; x1_in[1] = lon_in[n1]; y1_in[1] = lat_in[n1]; x1_in[2] = lon_in[n2]; y1_in[2] = lat_in[n2]; x1_in[3] = lon_in[n3]; y1_in[3] = lat_in[n3]; lat_in_min = minval_double(4, y1_in); lat_in_max = maxval_double(4, y1_in); n1_in = fix_lon(x1_in, y1_in, 4, M_PI); lon_in_min = minval_double(n1_in, x1_in); lon_in_max = maxval_double(n1_in, x1_in); lon_in_avg = avgval_double(n1_in, x1_in); for(ij=istart2[m]; ij<=iend2[m]; ij++) { int n_out, i2, j2, n2_in; double xarea, dx, lon_out_min, lon_out_max; double x2_in[MAX_V], y2_in[MAX_V]; i2 = ij%nx2; j2 = ij/nx2; if(lat_out_min_list[ij] >= lat_in_max || lat_out_max_list[ij] <= lat_in_min ) continue; /* adjust x2_in according to lon_in_avg*/ n2_in = n2_list[ij]; for(l=0; l<n2_in; l++) { x2_in[l] = lon_out_list[ij*MAX_V+l]; y2_in[l] = lat_out_list[ij*MAX_V+l]; } lon_out_min = lon_out_min_list[ij]; lon_out_max = lon_out_max_list[ij]; dx = lon_out_avg[ij] - lon_in_avg; if(dx < -M_PI ) { lon_out_min += TPI; lon_out_max += TPI; for (l=0; l<n2_in; l++) x2_in[l] += TPI; } else if (dx > M_PI) { lon_out_min -= TPI; lon_out_max -= TPI; for (l=0; l<n2_in; l++) x2_in[l] -= TPI; } /* x2_in should in the same range as x1_in after lon_fix, so no need to consider cyclic condition */ if(lon_out_min >= lon_in_max || lon_out_max <= lon_in_min ) continue; if ( (n_out = clip_2dx2d( x1_in, y1_in, n1_in, x2_in, y2_in, n2_in, x_out, y_out )) > 0) { double min_area; int nn; xarea = poly_area (x_out, y_out, n_out ) * mask_in[j1*nx1+i1]; min_area = min(area_in[j1*nx1+i1], area_out[j2*nx2+i2]); if( xarea/min_area > AREA_RATIO_THRESH ) { pnxgrid[m]++; if(pnxgrid[m]>= MAXXGRID/nthreads) error_handler("nxgrid is greater than MAXXGRID/nthreads, increase MAXXGRID, decrease nthreads, or increase number of MPI ranks"); nn = pstart[m] + pnxgrid[m]-1; pxgrid_area[nn] = xarea; pi_in[nn] = i1; pj_in[nn] = j1; pi_out[nn] = i2; pj_out[nn] = j2; } } } } } /*copy data if nblocks > 1 */ if(nblocks == 1) { nxgrid = pnxgrid[0]; pi_in = NULL; pj_in = NULL; pi_out = NULL; pj_out = NULL; pxgrid_area = NULL; } else { int nn, i; nxgrid = 0; for(m=0; m<nblocks; m++) { for(i=0; i<pnxgrid[m]; i++) { nn = pstart[m] + i; i_in[nxgrid] = pi_in[nn]; j_in[nxgrid] = pj_in[nn]; i_out[nxgrid] = pi_out[nn]; j_out[nxgrid] = pj_out[nn]; xgrid_area[nxgrid] = pxgrid_area[nn]; nxgrid++; } } free(pi_in); free(pj_in); free(pi_out); free(pj_out); free(pxgrid_area); } free(area_in); free(area_out); free(lon_out_min_list); free(lon_out_max_list); free(lat_out_min_list); free(lat_out_max_list); free(lon_out_avg); free(n2_list); free(lon_out_list); free(lat_out_list); return nxgrid; }/* get_xgrid_2Dx2D_order1 */ /******************************************************************************** void create_xgrid_2dx1d_order2 This routine generate exchange grids between two grids for the second order conservative interpolation. nlon_in,nlat_in,nlon_out,nlat_out are the size of the grid cell and lon_in,lat_in, lon_out,lat_out are geographic grid location of grid cell bounds. mask is on grid lon_in/lat_in. ********************************************************************************/ int create_xgrid_2dx2d_order2_(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area, double *xgrid_clon, double *xgrid_clat) { int nxgrid; nxgrid = create_xgrid_2dx2d_order2(nlon_in, nlat_in, nlon_out, nlat_out, lon_in, lat_in, lon_out, lat_out, mask_in, i_in, j_in, i_out, j_out, xgrid_area, xgrid_clon, xgrid_clat); return nxgrid; } int create_xgrid_2dx2d_order2(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area, double *xgrid_clon, double *xgrid_clat) { #define MAX_V 8 int nx1, nx2, ny1, ny2, nx1p, nx2p, nxgrid; double *area_in, *area_out; int nblocks =1; int *istart2=NULL, *iend2=NULL; int npts_left, nblks_left, pos, m, npts_my, ij; double *lon_out_min_list,*lon_out_max_list,*lon_out_avg,*lat_out_min_list,*lat_out_max_list; double *lon_out_list, *lat_out_list; int *pnxgrid=NULL, *pstart; int *pi_in=NULL, *pj_in=NULL, *pi_out=NULL, *pj_out=NULL; double *pxgrid_area=NULL, *pxgrid_clon=NULL, *pxgrid_clat=NULL; int *n2_list; int nthreads, nxgrid_block_max; nx1 = *nlon_in; ny1 = *nlat_in; nx2 = *nlon_out; ny2 = *nlat_out; nx1p = nx1 + 1; nx2p = nx2 + 1; area_in = (double *)malloc(nx1*ny1*sizeof(double)); area_out = (double *)malloc(nx2*ny2*sizeof(double)); get_grid_area(nlon_in, nlat_in, lon_in, lat_in, area_in); get_grid_area(nlon_out, nlat_out, lon_out, lat_out, area_out); nthreads = 1; #if defined(_OPENMP) #pragma omp parallel nthreads = omp_get_num_threads(); #endif nblocks = nthreads; istart2 = (int *)malloc(nblocks*sizeof(int)); iend2 = (int *)malloc(nblocks*sizeof(int)); pstart = (int *)malloc(nblocks*sizeof(int)); pnxgrid = (int *)malloc(nblocks*sizeof(int)); nxgrid_block_max = MAXXGRID/nblocks; for(m=0; m<nblocks; m++) { pnxgrid[m] = 0; pstart[m] = m*nxgrid_block_max; } if(nblocks == 1) { pi_in = i_in; pj_in = j_in; pi_out = i_out; pj_out = j_out; pxgrid_area = xgrid_area; pxgrid_clon = xgrid_clon; pxgrid_clat = xgrid_clat; } else { pi_in = (int *)malloc(MAXXGRID*sizeof(int)); pj_in = (int *)malloc(MAXXGRID*sizeof(int)); pi_out = (int *)malloc(MAXXGRID*sizeof(int)); pj_out = (int *)malloc(MAXXGRID*sizeof(int)); pxgrid_area = (double *)malloc(MAXXGRID*sizeof(double)); pxgrid_clon = (double *)malloc(MAXXGRID*sizeof(double)); pxgrid_clat = (double *)malloc(MAXXGRID*sizeof(double)); } npts_left = nx2*ny2; nblks_left = nblocks; pos = 0; for(m=0; m<nblocks; m++) { istart2[m] = pos; npts_my = npts_left/nblks_left; iend2[m] = istart2[m] + npts_my - 1; pos = iend2[m] + 1; npts_left -= npts_my; nblks_left--; } lon_out_min_list = (double *)malloc(nx2*ny2*sizeof(double)); lon_out_max_list = (double *)malloc(nx2*ny2*sizeof(double)); lat_out_min_list = (double *)malloc(nx2*ny2*sizeof(double)); lat_out_max_list = (double *)malloc(nx2*ny2*sizeof(double)); lon_out_avg = (double *)malloc(nx2*ny2*sizeof(double)); n2_list = (int *)malloc(nx2*ny2*sizeof(int)); lon_out_list = (double *)malloc(MAX_V*nx2*ny2*sizeof(double)); lat_out_list = (double *)malloc(MAX_V*nx2*ny2*sizeof(double)); #if defined(_OPENMP) #pragma omp parallel for default(none) shared(nx2,ny2,nx2p,lon_out,lat_out,lat_out_min_list, \ lat_out_max_list,lon_out_min_list,lon_out_max_list, \ lon_out_avg,n2_list,lon_out_list,lat_out_list) #endif for(ij=0; ij<nx2*ny2; ij++){ int i2, j2, n, n0, n1, n2, n3, n2_in, l; double x2_in[MV], y2_in[MV]; i2 = ij%nx2; j2 = ij/nx2; n = j2*nx2+i2; n0 = j2*nx2p+i2; n1 = j2*nx2p+i2+1; n2 = (j2+1)*nx2p+i2+1; n3 = (j2+1)*nx2p+i2; x2_in[0] = lon_out[n0]; y2_in[0] = lat_out[n0]; x2_in[1] = lon_out[n1]; y2_in[1] = lat_out[n1]; x2_in[2] = lon_out[n2]; y2_in[2] = lat_out[n2]; x2_in[3] = lon_out[n3]; y2_in[3] = lat_out[n3]; lat_out_min_list[n] = minval_double(4, y2_in); lat_out_max_list[n] = maxval_double(4, y2_in); n2_in = fix_lon(x2_in, y2_in, 4, M_PI); if(n2_in > MAX_V) error_handler("create_xgrid.c: n2_in is greater than MAX_V"); lon_out_min_list[n] = minval_double(n2_in, x2_in); lon_out_max_list[n] = maxval_double(n2_in, x2_in); lon_out_avg[n] = avgval_double(n2_in, x2_in); n2_list[n] = n2_in; for(l=0; l<n2_in; l++) { lon_out_list[n*MAX_V+l] = x2_in[l]; lat_out_list[n*MAX_V+l] = y2_in[l]; } } nxgrid = 0; #if defined(_OPENMP) #pragma omp parallel for default(none) shared(nblocks,nx1,ny1,nx1p,mask_in,lon_in,lat_in, \ istart2,iend2,nx2,lat_out_min_list,lat_out_max_list, \ n2_list,lon_out_list,lat_out_list,lon_out_min_list, \ lon_out_max_list,lon_out_avg,area_in,area_out, \ pxgrid_area,pnxgrid,pxgrid_clon,pxgrid_clat,pi_in, \ pj_in,pi_out,pj_out,pstart,nthreads) #endif for(m=0; m<nblocks; m++) { int i1, j1, ij; for(j1=0; j1<ny1; j1++) for(i1=0; i1<nx1; i1++) if( mask_in[j1*nx1+i1] > MASK_THRESH ) { int n0, n1, n2, n3, l,n1_in; double lat_in_min,lat_in_max,lon_in_min,lon_in_max,lon_in_avg; double x1_in[MV], y1_in[MV], x_out[MV], y_out[MV]; n0 = j1*nx1p+i1; n1 = j1*nx1p+i1+1; n2 = (j1+1)*nx1p+i1+1; n3 = (j1+1)*nx1p+i1; x1_in[0] = lon_in[n0]; y1_in[0] = lat_in[n0]; x1_in[1] = lon_in[n1]; y1_in[1] = lat_in[n1]; x1_in[2] = lon_in[n2]; y1_in[2] = lat_in[n2]; x1_in[3] = lon_in[n3]; y1_in[3] = lat_in[n3]; lat_in_min = minval_double(4, y1_in); lat_in_max = maxval_double(4, y1_in); n1_in = fix_lon(x1_in, y1_in, 4, M_PI); lon_in_min = minval_double(n1_in, x1_in); lon_in_max = maxval_double(n1_in, x1_in); lon_in_avg = avgval_double(n1_in, x1_in); for(ij=istart2[m]; ij<=iend2[m]; ij++) { int n_out, i2, j2, n2_in; double xarea, dx, lon_out_min, lon_out_max; double x2_in[MAX_V], y2_in[MAX_V]; i2 = ij%nx2; j2 = ij/nx2; if(lat_out_min_list[ij] >= lat_in_max || lat_out_max_list[ij] <= lat_in_min ) continue; /* adjust x2_in according to lon_in_avg*/ n2_in = n2_list[ij]; for(l=0; l<n2_in; l++) { x2_in[l] = lon_out_list[ij*MAX_V+l]; y2_in[l] = lat_out_list[ij*MAX_V+l]; } lon_out_min = lon_out_min_list[ij]; lon_out_max = lon_out_max_list[ij]; dx = lon_out_avg[ij] - lon_in_avg; if(dx < -M_PI ) { lon_out_min += TPI; lon_out_max += TPI; for (l=0; l<n2_in; l++) x2_in[l] += TPI; } else if (dx > M_PI) { lon_out_min -= TPI; lon_out_max -= TPI; for (l=0; l<n2_in; l++) x2_in[l] -= TPI; } /* x2_in should in the same range as x1_in after lon_fix, so no need to consider cyclic condition */ if(lon_out_min >= lon_in_max || lon_out_max <= lon_in_min ) continue; if ( (n_out = clip_2dx2d( x1_in, y1_in, n1_in, x2_in, y2_in, n2_in, x_out, y_out )) > 0) { double min_area; int nn; xarea = poly_area (x_out, y_out, n_out ) * mask_in[j1*nx1+i1]; min_area = min(area_in[j1*nx1+i1], area_out[j2*nx2+i2]); if( xarea/min_area > AREA_RATIO_THRESH ) { pnxgrid[m]++; if(pnxgrid[m]>= MAXXGRID/nthreads) error_handler("nxgrid is greater than MAXXGRID/nthreads, increase MAXXGRID, decrease nthreads, or increase number of MPI ranks"); nn = pstart[m] + pnxgrid[m]-1; pxgrid_area[nn] = xarea; pxgrid_clon[nn] = poly_ctrlon(x_out, y_out, n_out, lon_in_avg); pxgrid_clat[nn] = poly_ctrlat (x_out, y_out, n_out ); pi_in[nn] = i1; pj_in[nn] = j1; pi_out[nn] = i2; pj_out[nn] = j2; } } } } } /*copy data if nblocks > 1 */ if(nblocks == 1) { nxgrid = pnxgrid[0]; pi_in = NULL; pj_in = NULL; pi_out = NULL; pj_out = NULL; pxgrid_area = NULL; pxgrid_clon = NULL; pxgrid_clat = NULL; } else { int nn, i; nxgrid = 0; for(m=0; m<nblocks; m++) { for(i=0; i<pnxgrid[m]; i++) { nn = pstart[m] + i; i_in[nxgrid] = pi_in[nn]; j_in[nxgrid] = pj_in[nn]; i_out[nxgrid] = pi_out[nn]; j_out[nxgrid] = pj_out[nn]; xgrid_area[nxgrid] = pxgrid_area[nn]; xgrid_clon[nxgrid] = pxgrid_clon[nn]; xgrid_clat[nxgrid] = pxgrid_clat[nn]; nxgrid++; } } free(pi_in); free(pj_in); free(pi_out); free(pj_out); free(pxgrid_area); free(pxgrid_clon); free(pxgrid_clat); } free(area_in); free(area_out); free(lon_out_min_list); free(lon_out_max_list); free(lat_out_min_list); free(lat_out_max_list); free(lon_out_avg); free(n2_list); free(lon_out_list); free(lat_out_list); return nxgrid; }/* get_xgrid_2Dx2D_order2 */ /******************************************************************************* Sutherland-Hodgeman algorithm sequentially clips parts outside 4 boundaries *******************************************************************************/ int clip(const double lon_in[], const double lat_in[], int n_in, double ll_lon, double ll_lat, double ur_lon, double ur_lat, double lon_out[], double lat_out[]) { double x_tmp[MV], y_tmp[MV], x_last, y_last; int i_in, i_out, n_out, inside_last, inside; /* clip polygon with LEFT boundary - clip V_IN to V_TMP */ x_last = lon_in[n_in-1]; y_last = lat_in[n_in-1]; inside_last = (x_last >= ll_lon); for (i_in=0,i_out=0;i_in<n_in;i_in++) { /* if crossing LEFT boundary - output intersection */ if ((inside=(lon_in[i_in] >= ll_lon))!=inside_last) { x_tmp[i_out] = ll_lon; y_tmp[i_out++] = y_last + (ll_lon - x_last) * (lat_in[i_in] - y_last) / (lon_in[i_in] - x_last); } /* if "to" point is right of LEFT boundary, output it */ if (inside) { x_tmp[i_out] = lon_in[i_in]; y_tmp[i_out++] = lat_in[i_in]; } x_last = lon_in[i_in]; y_last = lat_in[i_in]; inside_last = inside; } if (!(n_out=i_out)) return(0); /* clip polygon with RIGHT boundary - clip V_TMP to V_OUT */ x_last = x_tmp[n_out-1]; y_last = y_tmp[n_out-1]; inside_last = (x_last <= ur_lon); for (i_in=0,i_out=0;i_in<n_out;i_in++) { /* if crossing RIGHT boundary - output intersection */ if ((inside=(x_tmp[i_in] <= ur_lon))!=inside_last) { lon_out[i_out] = ur_lon; lat_out[i_out++] = y_last + (ur_lon - x_last) * (y_tmp[i_in] - y_last) / (x_tmp[i_in] - x_last); } /* if "to" point is left of RIGHT boundary, output it */ if (inside) { lon_out[i_out] = x_tmp[i_in]; lat_out[i_out++] = y_tmp[i_in]; } x_last = x_tmp[i_in]; y_last = y_tmp[i_in]; inside_last = inside; } if (!(n_out=i_out)) return(0); /* clip polygon with BOTTOM boundary - clip V_OUT to V_TMP */ x_last = lon_out[n_out-1]; y_last = lat_out[n_out-1]; inside_last = (y_last >= ll_lat); for (i_in=0,i_out=0;i_in<n_out;i_in++) { /* if crossing BOTTOM boundary - output intersection */ if ((inside=(lat_out[i_in] >= ll_lat))!=inside_last) { y_tmp[i_out] = ll_lat; x_tmp[i_out++] = x_last + (ll_lat - y_last) * (lon_out[i_in] - x_last) / (lat_out[i_in] - y_last); } /* if "to" point is above BOTTOM boundary, output it */ if (inside) { x_tmp[i_out] = lon_out[i_in]; y_tmp[i_out++] = lat_out[i_in]; } x_last = lon_out[i_in]; y_last = lat_out[i_in]; inside_last = inside; } if (!(n_out=i_out)) return(0); /* clip polygon with TOP boundary - clip V_TMP to V_OUT */ x_last = x_tmp[n_out-1]; y_last = y_tmp[n_out-1]; inside_last = (y_last <= ur_lat); for (i_in=0,i_out=0;i_in<n_out;i_in++) { /* if crossing TOP boundary - output intersection */ if ((inside=(y_tmp[i_in] <= ur_lat))!=inside_last) { lat_out[i_out] = ur_lat; lon_out[i_out++] = x_last + (ur_lat - y_last) * (x_tmp[i_in] - x_last) / (y_tmp[i_in] - y_last); } /* if "to" point is below TOP boundary, output it */ if (inside) { lon_out[i_out] = x_tmp[i_in]; lat_out[i_out++] = y_tmp[i_in]; } x_last = x_tmp[i_in]; y_last = y_tmp[i_in]; inside_last = inside; } return(i_out); } /* clip */ /******************************************************************************* Revise Sutherland-Hodgeman algorithm to find the vertices of the overlapping between any two grid boxes. It return the number of vertices for the exchange grid. *******************************************************************************/ int clip_2dx2d(const double lon1_in[], const double lat1_in[], int n1_in, const double lon2_in[], const double lat2_in[], int n2_in, double lon_out[], double lat_out[]) { double lon_tmp[MV], lat_tmp[MV]; double x1_0, y1_0, x1_1, y1_1, x2_0, y2_0, x2_1, y2_1; double dx1, dy1, dx2, dy2, determ, ds1, ds2; int i_out, n_out, inside_last, inside, i1, i2; /* clip polygon with each boundary of the polygon */ /* We treat lon1_in/lat1_in as clip polygon and lon2_in/lat2_in as subject polygon */ n_out = n1_in; for(i1=0; i1<n1_in; i1++) { lon_tmp[i1] = lon1_in[i1]; lat_tmp[i1] = lat1_in[i1]; } x2_0 = lon2_in[n2_in-1]; y2_0 = lat2_in[n2_in-1]; for(i2=0; i2<n2_in; i2++) { x2_1 = lon2_in[i2]; y2_1 = lat2_in[i2]; x1_0 = lon_tmp[n_out-1]; y1_0 = lat_tmp[n_out-1]; inside_last = inside_edge( x2_0, y2_0, x2_1, y2_1, x1_0, y1_0); for(i1=0, i_out=0; i1<n_out; i1++) { x1_1 = lon_tmp[i1]; y1_1 = lat_tmp[i1]; if((inside = inside_edge(x2_0, y2_0, x2_1, y2_1, x1_1, y1_1)) != inside_last ) { /* there is intersection, the line between <x1_0,y1_0> and <x1_1,y1_1> should not parallel to the line between <x2_0,y2_0> and <x2_1,y2_1> may need to consider truncation error */ dy1 = y1_1-y1_0; dy2 = y2_1-y2_0; dx1 = x1_1-x1_0; dx2 = x2_1-x2_0; ds1 = y1_0*x1_1 - y1_1*x1_0; ds2 = y2_0*x2_1 - y2_1*x2_0; determ = dy2*dx1 - dy1*dx2; if(fabs(determ) < EPSLN30) { error_handler("the line between <x1_0,y1_0> and <x1_1,y1_1> should not parallel to " "the line between <x2_0,y2_0> and <x2_1,y2_1>"); } lon_out[i_out] = (dx2*ds1 - dx1*ds2)/determ; lat_out[i_out++] = (dy2*ds1 - dy1*ds2)/determ; } if(inside) { lon_out[i_out] = x1_1; lat_out[i_out++] = y1_1; } x1_0 = x1_1; y1_0 = y1_1; inside_last = inside; } if(!(n_out=i_out)) return 0; for(i1=0; i1<n_out; i1++) { lon_tmp[i1] = lon_out[i1]; lat_tmp[i1] = lat_out[i1]; } /* shift the starting point */ x2_0 = x2_1; y2_0 = y2_1; } return(n_out); } /* clip */ /*#define debug_test_create_xgrid*/ int create_xgrid_great_circle_(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area, double *xgrid_clon, double *xgrid_clat) { int nxgrid; nxgrid = create_xgrid_great_circle(nlon_in, nlat_in, nlon_out, nlat_out, lon_in, lat_in, lon_out, lat_out, mask_in, i_in, j_in, i_out, j_out, xgrid_area, xgrid_clon, xgrid_clat); return nxgrid; } int create_xgrid_great_circle(const int *nlon_in, const int *nlat_in, const int *nlon_out, const int *nlat_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *i_out, int *j_out, double *xgrid_area, double *xgrid_clon, double *xgrid_clat) { int nx1, nx2, ny1, ny2, nx1p, nx2p, ny1p, ny2p, nxgrid, n1_in, n2_in; int n0, n1, n2, n3, i1, j1, i2, j2; double x1_in[MV], y1_in[MV], z1_in[MV]; double x2_in[MV], y2_in[MV], z2_in[MV]; double x_out[MV], y_out[MV], z_out[MV]; double *x1=NULL, *y1=NULL, *z1=NULL; double *x2=NULL, *y2=NULL, *z2=NULL; double *area1, *area2, min_area; nx1 = *nlon_in; ny1 = *nlat_in; nx2 = *nlon_out; ny2 = *nlat_out; nxgrid = 0; nx1p = nx1 + 1; nx2p = nx2 + 1; ny1p = ny1 + 1; ny2p = ny2 + 1; /* first convert lon-lat to cartesian coordinates */ x1 = (double *)malloc(nx1p*ny1p*sizeof(double)); y1 = (double *)malloc(nx1p*ny1p*sizeof(double)); z1 = (double *)malloc(nx1p*ny1p*sizeof(double)); x2 = (double *)malloc(nx2p*ny2p*sizeof(double)); y2 = (double *)malloc(nx2p*ny2p*sizeof(double)); z2 = (double *)malloc(nx2p*ny2p*sizeof(double)); latlon2xyz(nx1p*ny1p, lon_in, lat_in, x1, y1, z1); latlon2xyz(nx2p*ny2p, lon_out, lat_out, x2, y2, z2); area1 = (double *)malloc(nx1*ny1*sizeof(double)); area2 = (double *)malloc(nx2*ny2*sizeof(double)); get_grid_great_circle_area(nlon_in, nlat_in, lon_in, lat_in, area1); get_grid_great_circle_area(nlon_out, nlat_out, lon_out, lat_out, area2); n1_in = 4; n2_in = 4; for(j1=0; j1<ny1; j1++) for(i1=0; i1<nx1; i1++) if( mask_in[j1*nx1+i1] > MASK_THRESH ) { /* clockwise */ n0 = j1*nx1p+i1; n1 = (j1+1)*nx1p+i1; n2 = (j1+1)*nx1p+i1+1; n3 = j1*nx1p+i1+1; x1_in[0] = x1[n0]; y1_in[0] = y1[n0]; z1_in[0] = z1[n0]; x1_in[1] = x1[n1]; y1_in[1] = y1[n1]; z1_in[1] = z1[n1]; x1_in[2] = x1[n2]; y1_in[2] = y1[n2]; z1_in[2] = z1[n2]; x1_in[3] = x1[n3]; y1_in[3] = y1[n3]; z1_in[3] = z1[n3]; for(j2=0; j2<ny2; j2++) for(i2=0; i2<nx2; i2++) { int n_out; double xarea; n0 = j2*nx2p+i2; n1 = (j2+1)*nx2p+i2; n2 = (j2+1)*nx2p+i2+1; n3 = j2*nx2p+i2+1; x2_in[0] = x2[n0]; y2_in[0] = y2[n0]; z2_in[0] = z2[n0]; x2_in[1] = x2[n1]; y2_in[1] = y2[n1]; z2_in[1] = z2[n1]; x2_in[2] = x2[n2]; y2_in[2] = y2[n2]; z2_in[2] = z2[n2]; x2_in[3] = x2[n3]; y2_in[3] = y2[n3]; z2_in[3] = z2[n3]; if ( (n_out = clip_2dx2d_great_circle( x1_in, y1_in, z1_in, n1_in, x2_in, y2_in, z2_in, n2_in, x_out, y_out, z_out)) > 0) { xarea = great_circle_area ( n_out, x_out, y_out, z_out ) * mask_in[j1*nx1+i1]; min_area = min(area1[j1*nx1+i1], area2[j2*nx2+i2]); if( xarea/min_area > AREA_RATIO_THRESH ) { #ifdef debug_test_create_xgrid printf("(i2,j2)=(%d,%d), (i1,j1)=(%d,%d), xarea=%g\n", i2, j2, i1, j1, xarea); #endif xgrid_area[nxgrid] = xarea; xgrid_clon[nxgrid] = 0; /*z1l: will be developed very soon */ xgrid_clat[nxgrid] = 0; i_in[nxgrid] = i1; j_in[nxgrid] = j1; i_out[nxgrid] = i2; j_out[nxgrid] = j2; ++nxgrid; if(nxgrid > MAXXGRID) error_handler("nxgrid is greater than MAXXGRID, increase MAXXGRID"); } } } } free(area1); free(area2); free(x1); free(y1); free(z1); free(x2); free(y2); free(z2); return nxgrid; }/* create_xgrid_great_circle */ int create_xgrid_great_circle_ug_(const int *nlon_in, const int *nlat_in, const int *npts_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *l_out, double *xgrid_area, double *xgrid_clon, double *xgrid_clat) { int nxgrid; nxgrid = create_xgrid_great_circle_ug(nlon_in, nlat_in, npts_out, lon_in, lat_in, lon_out, lat_out, mask_in, i_in, j_in, l_out, xgrid_area, xgrid_clon, xgrid_clat); return nxgrid; } int create_xgrid_great_circle_ug(const int *nlon_in, const int *nlat_in, const int *npts_out, const double *lon_in, const double *lat_in, const double *lon_out, const double *lat_out, const double *mask_in, int *i_in, int *j_in, int *l_out, double *xgrid_area, double *xgrid_clon, double *xgrid_clat) { int nx1, ny1, npts2, nx1p, ny1p, nxgrid, n1_in, n2_in, nv; int n0, n1, n2, n3, i1, j1, l2; double x1_in[MV], y1_in[MV], z1_in[MV]; double x2_in[MV], y2_in[MV], z2_in[MV]; double x_out[MV], y_out[MV], z_out[MV]; double *x1=NULL, *y1=NULL, *z1=NULL; double *x2=NULL, *y2=NULL, *z2=NULL; double *area1, *area2, min_area; nx1 = *nlon_in; ny1 = *nlat_in; nv = 4; npts2 = *npts_out; nxgrid = 0; nx1p = nx1 + 1; ny1p = ny1 + 1; /* first convert lon-lat to cartesian coordinates */ x1 = (double *)malloc(nx1p*ny1p*sizeof(double)); y1 = (double *)malloc(nx1p*ny1p*sizeof(double)); z1 = (double *)malloc(nx1p*ny1p*sizeof(double)); x2 = (double *)malloc(npts2*nv*sizeof(double)); y2 = (double *)malloc(npts2*nv*sizeof(double)); z2 = (double *)malloc(npts2*nv*sizeof(double)); latlon2xyz(nx1p*ny1p, lon_in, lat_in, x1, y1, z1); latlon2xyz(npts2*nv, lon_out, lat_out, x2, y2, z2); area1 = (double *)malloc(nx1*ny1*sizeof(double)); area2 = (double *)malloc(npts2*sizeof(double)); get_grid_great_circle_area(nlon_in, nlat_in, lon_in, lat_in, area1); get_grid_great_circle_area_ug(npts_out, lon_out, lat_out, area2); n1_in = 4; n2_in = 4; for(j1=0; j1<ny1; j1++) for(i1=0; i1<nx1; i1++) if( mask_in[j1*nx1+i1] > MASK_THRESH ) { /* clockwise */ n0 = j1*nx1p+i1; n1 = (j1+1)*nx1p+i1; n2 = (j1+1)*nx1p+i1+1; n3 = j1*nx1p+i1+1; x1_in[0] = x1[n0]; y1_in[0] = y1[n0]; z1_in[0] = z1[n0]; x1_in[1] = x1[n1]; y1_in[1] = y1[n1]; z1_in[1] = z1[n1]; x1_in[2] = x1[n2]; y1_in[2] = y1[n2]; z1_in[2] = z1[n2]; x1_in[3] = x1[n3]; y1_in[3] = y1[n3]; z1_in[3] = z1[n3]; for(l2=0; l2<npts2; l2++) { int n_out; double xarea; n0 = l2*nv; n1 = l2*nv+1; n2 = l2*nv+2; n3 = l2*nv+3; x2_in[0] = x2[n0]; y2_in[0] = y2[n0]; z2_in[0] = z2[n0]; x2_in[1] = x2[n1]; y2_in[1] = y2[n1]; z2_in[1] = z2[n1]; x2_in[2] = x2[n2]; y2_in[2] = y2[n2]; z2_in[2] = z2[n2]; x2_in[3] = x2[n3]; y2_in[3] = y2[n3]; z2_in[3] = z2[n3]; if ( (n_out = clip_2dx2d_great_circle( x1_in, y1_in, z1_in, n1_in, x2_in, y2_in, z2_in, n2_in, x_out, y_out, z_out)) > 0) { xarea = great_circle_area ( n_out, x_out, y_out, z_out ) * mask_in[j1*nx1+i1]; min_area = min(area1[j1*nx1+i1], area2[l2]); if( xarea/min_area > AREA_RATIO_THRESH ) { #ifdef debug_test_create_xgrid printf("(l2)=(%d,%d), (i1,j1)=(%d,%d), xarea=%g\n", l2, i1, j1, xarea); #endif xgrid_area[nxgrid] = xarea; xgrid_clon[nxgrid] = 0; /*z1l: will be developed very soon */ xgrid_clat[nxgrid] = 0; i_in[nxgrid] = i1; j_in[nxgrid] = j1; l_out[nxgrid] = l2; ++nxgrid; if(nxgrid > MAXXGRID) error_handler("nxgrid is greater than MAXXGRID, increase MAXXGRID"); } } } } free(area1); free(area2); free(x1); free(y1); free(z1); free(x2); free(y2); free(z2); return nxgrid; }/* create_xgrid_great_circle_ug */ /******************************************************************************* Revise Sutherland-Hodgeman algorithm to find the vertices of the overlapping between any two grid boxes. It return the number of vertices for the exchange grid. Each edge of grid box is a part of great circle. All the points are cartesian coordinates. Here we are assuming each polygon is convex. RANGE_CHECK_CRITERIA is used to determine if the two grid boxes are possible to be overlap. The size should be between 0 and 0.5. The larger the range_check_criteria, the more expensive of the computatioin. When the value is close to 0, some small exchange grid might be lost. Suggest to use value 0.05 for C48. *******************************************************************************/ int clip_2dx2d_great_circle(const double x1_in[], const double y1_in[], const double z1_in[], int n1_in, const double x2_in[], const double y2_in[], const double z2_in [], int n2_in, double x_out[], double y_out[], double z_out[]) { struct Node *grid1List=NULL; struct Node *grid2List=NULL; struct Node *intersectList=NULL; struct Node *polyList=NULL; struct Node *curList=NULL; struct Node *firstIntersect=NULL, *curIntersect=NULL; struct Node *temp1=NULL, *temp2=NULL, *temp=NULL; int i1, i2, i1p, i2p, i2p2, npts1, npts2; int nintersect, n_out; int maxiter1, maxiter2, iter1, iter2; int found1, found2, curListNum; int has_inbound, inbound; double pt1[MV][3], pt2[MV][3]; double *p1_0=NULL, *p1_1=NULL; double *p2_0=NULL, *p2_1=NULL, *p2_2=NULL; double intersect[3]; double u1, u2; double min_x1, max_x1, min_y1, max_y1, min_z1, max_z1; double min_x2, max_x2, min_y2, max_y2, min_z2, max_z2; /* first check the min and max of (x1_in, y1_in, z1_in) with (x2_in, y2_in, z2_in) */ min_x1 = minval_double(n1_in, x1_in); max_x2 = maxval_double(n2_in, x2_in); if(min_x1 >= max_x2+RANGE_CHECK_CRITERIA) return 0; max_x1 = maxval_double(n1_in, x1_in); min_x2 = minval_double(n2_in, x2_in); if(min_x2 >= max_x1+RANGE_CHECK_CRITERIA) return 0; min_y1 = minval_double(n1_in, y1_in); max_y2 = maxval_double(n2_in, y2_in); if(min_y1 >= max_y2+RANGE_CHECK_CRITERIA) return 0; max_y1 = maxval_double(n1_in, y1_in); min_y2 = minval_double(n2_in, y2_in); if(min_y2 >= max_y1+RANGE_CHECK_CRITERIA) return 0; min_z1 = minval_double(n1_in, z1_in); max_z2 = maxval_double(n2_in, z2_in); if(min_z1 >= max_z2+RANGE_CHECK_CRITERIA) return 0; max_z1 = maxval_double(n1_in, z1_in); min_z2 = minval_double(n2_in, z2_in); if(min_z2 >= max_z1+RANGE_CHECK_CRITERIA) return 0; rewindList(); grid1List = getNext(); grid2List = getNext(); intersectList = getNext(); polyList = getNext(); /* insert points into SubjList and ClipList */ for(i1=0; i1<n1_in; i1++) addEnd(grid1List, x1_in[i1], y1_in[i1], z1_in[i1], 0, 0, 0, -1); for(i2=0; i2<n2_in; i2++) addEnd(grid2List, x2_in[i2], y2_in[i2], z2_in[i2], 0, 0, 0, -1); npts1 = length(grid1List); npts2 = length(grid2List); n_out = 0; /* set the inside value */ #ifdef debug_test_create_xgrid printf("\nNOTE from clip_2dx2d_great_circle: begin to set inside value grid1List\n"); #endif /* first check number of points in grid1 is inside grid2 */ temp = grid1List; while(temp) { if(insidePolygon(temp, grid2List)) temp->isInside = 1; else temp->isInside = 0; temp = getNextNode(temp); } #ifdef debug_test_create_xgrid printf("\nNOTE from clip_2dx2d_great_circle: begin to set inside value of grid2List\n"); #endif /* check if grid2List is inside grid1List */ temp = grid2List; while(temp) { if(insidePolygon(temp, grid1List)) temp->isInside = 1; else temp->isInside = 0; temp = getNextNode(temp); } /* make sure the grid box is clockwise */ /*make sure each polygon is convex, which is equivalent that the great_circle_area is positive */ if( gridArea(grid1List) <= 0 ) error_handler("create_xgrid.c(clip_2dx2d_great_circle): grid box 1 is not convex"); if( gridArea(grid2List) <= 0 ) error_handler("create_xgrid.c(clip_2dx2d_great_circle): grid box 2 is not convex"); #ifdef debug_test_create_xgrid printNode(grid1List, "grid1List"); printNode(grid2List, "grid2List"); #endif /* get the coordinates from grid1List and grid2List. Please not npts1 might not equal n1_in, npts2 might not equal n2_in because of pole */ temp = grid1List; for(i1=0; i1<npts1; i1++) { getCoordinates(temp, pt1[i1]); temp = temp->Next; } temp = grid2List; for(i2=0; i2<npts2; i2++) { getCoordinates(temp, pt2[i2]); temp = temp->Next; } firstIntersect=getNext(); curIntersect = getNext(); #ifdef debug_test_create_xgrid printf("\n\n************************ Start line_intersect_2D_3D ******************************\n"); #endif /* first find all the intersection points */ nintersect = 0; for(i1=0; i1<npts1; i1++) { i1p = (i1+1)%npts1; p1_0 = pt1[i1]; p1_1 = pt1[i1p]; for(i2=0; i2<npts2; i2++) { i2p = (i2+1)%npts2; i2p2 = (i2+2)%npts2; p2_0 = pt2[i2]; p2_1 = pt2[i2p]; p2_2 = pt2[i2p2]; #ifdef debug_test_create_xgrid printf("\n******************************************************************************\n"); printf(" i1 = %d, i2 = %d \n", i1, i2); printf("********************************************************************************\n"); #endif if( line_intersect_2D_3D(p1_0, p1_1, p2_0, p2_1, p2_2, intersect, &u1, &u2, &inbound) ) { /* from the value of u1, u2 and inbound, we can partially decide if a point is inside or outside of polygon */ /* add the intersection into intersetList, The intersection might already be in intersectList and will be taken care addIntersect */ if(addIntersect(intersectList, intersect[0], intersect[1], intersect[2], 1, u1, u2, inbound, i1, i1p, i2, i2p)) { /* add the intersection into the grid1List */ if(u1 == 1) { insertIntersect(grid1List, intersect[0], intersect[1], intersect[2], 0.0, u2, inbound, p1_1[0], p1_1[1], p1_1[2]); } else insertIntersect(grid1List, intersect[0], intersect[1], intersect[2], u1, u2, inbound, p1_0[0], p1_0[1], p1_0[2]); /* when u1 == 0 or 1, need to adjust the vertice to intersect value for roundoff error */ if(u1==1) { p1_1[0] = intersect[0]; p1_1[1] = intersect[1]; p1_1[2] = intersect[2]; } else if(u1 == 0) { p1_0[0] = intersect[0]; p1_0[1] = intersect[1]; p1_0[2] = intersect[2]; } /* add the intersection into the grid2List */ if(u2==1) insertIntersect(grid2List, intersect[0], intersect[1], intersect[2], 0.0, u1, 0, p2_1[0], p2_1[1], p2_1[2]); else insertIntersect(grid2List, intersect[0], intersect[1], intersect[2], u2, u1, 0, p2_0[0], p2_0[1], p2_0[2]); /* when u2 == 0 or 1, need to adjust the vertice to intersect value for roundoff error */ if(u2==1) { p2_1[0] = intersect[0]; p2_1[1] = intersect[1]; p2_1[2] = intersect[2]; } else if(u2 == 0) { p2_0[0] = intersect[0]; p2_0[1] = intersect[1]; p2_0[2] = intersect[2]; } } } } } /* set inbound value for the points in intersectList that has inbound == 0, this will also set some inbound value of the points in grid1List */ /* get the first point in intersectList has inbound = 2, if not, set inbound value */ has_inbound = 0; /* loop through intersectList to see if there is any has inbound=1 or 2 */ temp = intersectList; nintersect = length(intersectList); if(nintersect > 1) { getFirstInbound(intersectList, firstIntersect); if(firstIntersect->initialized) { has_inbound = 1; } } /* when has_inbound == 0, get the grid1List and grid2List */ if( !has_inbound && nintersect > 1) { setInbound(intersectList, grid1List); getFirstInbound(intersectList, firstIntersect); if(firstIntersect->initialized) has_inbound = 1; } /* if has_inbound = 1, find the overlapping */ n_out = 0; if(has_inbound) { maxiter1 = nintersect; #ifdef debug_test_create_xgrid printf("\nNOTE from clip_2dx2d_great_circle: number of intersect is %d\n", nintersect); printf("\n size of grid2List is %d, size of grid1List is %d\n", length(grid2List), length(grid1List)); printNode(intersectList, "beginning intersection list"); printNode(grid2List, "beginning clip list"); printNode(grid1List, "beginning subj list"); printf("\n************************ End line_intersect_2D_3D **********************************\n\n"); #endif temp1 = getNode(grid1List, *firstIntersect); if( temp1 == NULL) { double lon[10], lat[10]; int i; xyz2latlon(n1_in, x1_in, y1_in, z1_in, lon, lat); for(i=0; i< n1_in; i++) printf("lon1 = %g, lat1 = %g\n", lon[i]*R2D, lat[i]*R2D); printf("\n"); xyz2latlon(n2_in, x2_in, y2_in, z2_in, lon, lat); for(i=0; i< n2_in; i++) printf("lon2 = %g, lat2 = %g\n", lon[i]*R2D, lat[i]*R2D); printf("\n"); error_handler("firstIntersect is not in the grid1List"); } addNode(polyList, *firstIntersect); nintersect--; #ifdef debug_test_create_xgrid printNode(polyList, "polyList at stage 1"); #endif /* Loop over the grid1List and grid2List to find again the firstIntersect */ curList = grid1List; curListNum = 0; /* Loop through curList to find the next intersection, the loop will end when come back to firstIntersect */ copyNode(curIntersect, *firstIntersect); iter1 = 0; found1 = 0; while( iter1 < maxiter1 ) { #ifdef debug_test_create_xgrid printf("\n----------- At iteration = %d\n\n", iter1+1 ); printNode(curIntersect, "curIntersect at the begining of iter1"); #endif /* find the curIntersect in curList and get the next intersection points */ temp1 = getNode(curList, *curIntersect); temp2 = temp1->Next; if( temp2 == NULL ) temp2 = curList; maxiter2 = length(curList); found2 = 0; iter2 = 0; /* Loop until find the next intersection */ while( iter2 < maxiter2 ) { int temp2IsIntersect; temp2IsIntersect = 0; if( isIntersect( *temp2 ) ) { /* copy the point and switch to the grid2List */ struct Node *temp3; /* first check if temp2 is the firstIntersect */ if( sameNode( *temp2, *firstIntersect) ) { found1 = 1; break; } temp3 = temp2->Next; if( temp3 == NULL) temp3 = curList; if( temp3 == NULL) error_handler("creat_xgrid.c: temp3 can not be NULL"); found2 = 1; /* if next node is inside or an intersection, need to keep on curList */ temp2IsIntersect = 1; if( isIntersect(*temp3) || (temp3->isInside == 1) ) found2 = 0; } if(found2) { copyNode(curIntersect, *temp2); break; } else { addNode(polyList, *temp2); #ifdef debug_test_create_xgrid printNode(polyList, "polyList at stage 2"); #endif if(temp2IsIntersect) { nintersect--; } } temp2 = temp2->Next; if( temp2 == NULL ) temp2 = curList; iter2 ++; } if(found1) break; if( !found2 ) error_handler(" not found the next intersection "); /* if find the first intersection, the poly found */ if( sameNode( *curIntersect, *firstIntersect) ) { found1 = 1; break; } /* add curIntersect to polyList and remove it from intersectList and curList */ addNode(polyList, *curIntersect); #ifdef debug_test_create_xgrid printNode(polyList, "polyList at stage 3"); #endif nintersect--; /* switch curList */ if( curListNum == 0) { curList = grid2List; curListNum = 1; } else { curList = grid1List; curListNum = 0; } iter1++; } if(!found1) error_handler("not return back to the first intersection"); /* currently we are only clipping convex polygon to convex polygon */ if( nintersect > 0) error_handler("After clipping, nintersect should be 0"); /* copy the polygon to x_out, y_out, z_out */ temp1 = polyList; while (temp1 != NULL) { getCoordinate(*temp1, x_out+n_out, y_out+n_out, z_out+n_out); temp1 = temp1->Next; n_out++; } /* if(n_out < 3) error_handler(" The clipped region has < 3 vertices"); */ if( n_out < 3) n_out = 0; #ifdef debug_test_create_xgrid printNode(polyList, "polyList after clipping"); #endif } /* check if grid1 is inside grid2 */ if(n_out==0){ /* first check number of points in grid1 is inside grid2 */ int n, n1in2; /* One possible is that grid1List is inside grid2List */ #ifdef debug_test_create_xgrid printf("\nNOTE from clip_2dx2d_great_circle: check if grid1 is inside grid2\n"); #endif n1in2 = 0; temp = grid1List; while(temp) { if(temp->intersect != 1) { #ifdef debug_test_create_xgrid printf("grid1->isInside = %d\n", temp->isInside); #endif if( temp->isInside == 1) n1in2++; } temp = getNextNode(temp); } if(npts1==n1in2) { /* grid1 is inside grid2 */ n_out = npts1; n = 0; temp = grid1List; while( temp ) { getCoordinate(*temp, &x_out[n], &y_out[n], &z_out[n]); n++; temp = getNextNode(temp); } } if(n_out>0) return n_out; } /* check if grid2List is inside grid1List */ if(n_out ==0){ int n, n2in1; #ifdef debug_test_create_xgrid printf("\nNOTE from clip_2dx2d_great_circle: check if grid2 is inside grid1\n"); #endif temp = grid2List; n2in1 = 0; while(temp) { if(temp->intersect != 1) { #ifdef debug_test_create_xgrid printf("grid2->isInside = %d\n", temp->isInside); #endif if( temp->isInside == 1) n2in1++; } temp = getNextNode(temp); } if(npts2==n2in1) { /* grid2 is inside grid1 */ n_out = npts2; n = 0; temp = grid2List; while( temp ) { getCoordinate(*temp, &x_out[n], &y_out[n], &z_out[n]); n++; temp = getNextNode(temp); } } } return n_out; } /* Intersects between the line a and the seqment s where both line and segment are great circle lines on the sphere represented by 3D cartesian points. [sin sout] are the ends of a line segment returns true if the lines could be intersected, false otherwise. inbound means the direction of (a1,a2) go inside or outside of (q1,q2,q3) */ int line_intersect_2D_3D(double *a1, double *a2, double *q1, double *q2, double *q3, double *intersect, double *u_a, double *u_q, int *inbound){ /* Do this intersection by reprsenting the line a1 to a2 as a plane through the two line points and the origin of the sphere (0,0,0). This is the definition of a great circle arc. */ double plane[9]; double plane_p[2]; double u; double p1[3], v1[3], v2[3]; double c1[3], c2[3], c3[3]; double coincident, sense, norm; int i; int is_inter1, is_inter2; *inbound = 0; /* first check if any vertices are the same */ if(samePoint(a1[0], a1[1], a1[2], q1[0], q1[1], q1[2])) { *u_a = 0; *u_q = 0; intersect[0] = a1[0]; intersect[1] = a1[1]; intersect[2] = a1[2]; #ifdef debug_test_create_xgrid printf("\nNOTE from line_intersect_2D_3D: u_a = %19.15f, u_q=%19.15f, inbound=%d\n", *u_a, *u_q, *inbound); #endif return 1; } else if (samePoint(a1[0], a1[1], a1[2], q2[0], q2[1], q2[2])) { *u_a = 0; *u_q = 1; intersect[0] = a1[0]; intersect[1] = a1[1]; intersect[2] = a1[2]; #ifdef debug_test_create_xgrid printf("\nNOTE from line_intersect_2D_3D: u_a = %19.15f, u_q=%19.15f, inbound=%d\n", *u_a, *u_q, *inbound); #endif return 1; } else if(samePoint(a2[0], a2[1], a2[2], q1[0], q1[1], q1[2])) { #ifdef debug_test_create_xgrid printf("\nNOTE from line_intersect_2D_3D: u_a = %19.15f, u_q=%19.15f, inbound=%d\n", *u_a, *u_q, *inbound); #endif *u_a = 1; *u_q = 0; intersect[0] = a2[0]; intersect[1] = a2[1]; intersect[2] = a2[2]; return 1; } else if (samePoint(a2[0], a2[1], a2[2], q2[0], q2[1], q2[2])) { #ifdef debug_test_create_xgrid printf("\nNOTE from line_intersect_2D_3D: u_a = %19.15f, u_q=%19.15f, inbound=%d\n", *u_a, *u_q, *inbound); #endif *u_a = 1; *u_q = 1; intersect[0] = a2[0]; intersect[1] = a2[1]; intersect[2] = a2[2]; return 1; } /* Load points defining plane into variable (these are supposed to be in counterclockwise order) */ plane[0]=q1[0]; plane[1]=q1[1]; plane[2]=q1[2]; plane[3]=q2[0]; plane[4]=q2[1]; plane[5]=q2[2]; plane[6]=0.0; plane[7]=0.0; plane[8]=0.0; /* Intersect the segment with the plane */ is_inter1 = intersect_tri_with_line(plane, a1, a2, plane_p, u_a); if(!is_inter1) return 0; if(fabs(*u_a) < EPSLN8) *u_a = 0; if(fabs(*u_a-1) < EPSLN8) *u_a = 1; #ifdef debug_test_create_xgrid printf("\nNOTE from line_intersect_2D_3D: u_a = %19.15f\n", *u_a); #endif if( (*u_a < 0) || (*u_a > 1) ) return 0; /* Load points defining plane into variable (these are supposed to be in counterclockwise order) */ plane[0]=a1[0]; plane[1]=a1[1]; plane[2]=a1[2]; plane[3]=a2[0]; plane[4]=a2[1]; plane[5]=a2[2]; plane[6]=0.0; plane[7]=0.0; plane[8]=0.0; /* Intersect the segment with the plane */ is_inter2 = intersect_tri_with_line(plane, q1, q2, plane_p, u_q); if(!is_inter2) return 0; if(fabs(*u_q) < EPSLN8) *u_q = 0; if(fabs(*u_q-1) < EPSLN8) *u_q = 1; #ifdef debug_test_create_xgrid printf("\nNOTE from line_intersect_2D_3D: u_q = %19.15f\n", *u_q); #endif if( (*u_q < 0) || (*u_q > 1) ) return 0; u =*u_a; /* The two planes are coincidental */ vect_cross(a1, a2, c1); vect_cross(q1, q2, c2); vect_cross(c1, c2, c3); coincident = metric(c3); if(fabs(coincident) < EPSLN30) return 0; /* Calculate point of intersection */ intersect[0]=a1[0] + u*(a2[0]-a1[0]); intersect[1]=a1[1] + u*(a2[1]-a1[1]); intersect[2]=a1[2] + u*(a2[2]-a1[2]); norm = metric( intersect ); for(i = 0; i < 3; i ++) intersect[i] /= norm; /* when u_q =0 or u_q =1, the following could not decide the inbound value */ if(*u_q != 0 && *u_q != 1){ p1[0] = a2[0]-a1[0]; p1[1] = a2[1]-a1[1]; p1[2] = a2[2]-a1[2]; v1[0] = q2[0]-q1[0]; v1[1] = q2[1]-q1[1]; v1[2] = q2[2]-q1[2]; v2[0] = q3[0]-q2[0]; v2[1] = q3[1]-q2[1]; v2[2] = q3[2]-q2[2]; vect_cross(v1, v2, c1); vect_cross(v1, p1, c2); sense = dot(c1, c2); *inbound = 1; if(sense > 0) *inbound = 2; /* v1 going into v2 in CCW sense */ } #ifdef debug_test_create_xgrid printf("\nNOTE from line_intersect_2D_3D: inbound=%d\n", *inbound); #endif return 1; } /*------------------------------------------------------------------------------ double poly_ctrlat(const double x[], const double y[], int n) This routine is used to calculate the latitude of the centroid ---------------------------------------------------------------------------*/ double poly_ctrlat(const double x[], const double y[], int n) { double ctrlat = 0.0; int i; for (i=0;i<n;i++) { int ip = (i+1) % n; double dx = (x[ip]-x[i]); double dy, avg_y, hdy; double lat1, lat2; lat1 = y[ip]; lat2 = y[i]; dy = lat2 - lat1; hdy = dy*0.5; avg_y = (lat1+lat2)*0.5; if (dx==0.0) continue; if(dx > M_PI) dx = dx - 2.0*M_PI; if(dx < -M_PI) dx = dx + 2.0*M_PI; if ( fabs(hdy)< SMALL_VALUE ) /* cheap area calculation along latitude */ ctrlat -= dx*(2*cos(avg_y) + lat2*sin(avg_y) - cos(lat1) ); else ctrlat -= dx*( (sin(hdy)/hdy)*(2*cos(avg_y) + lat2*sin(avg_y)) - cos(lat1) ); } return (ctrlat*RADIUS*RADIUS); } /* poly_ctrlat */ /*------------------------------------------------------------------------------ double poly_ctrlon(const double x[], const double y[], int n, double clon) This routine is used to calculate the lontitude of the centroid. ---------------------------------------------------------------------------*/ double poly_ctrlon(const double x[], const double y[], int n, double clon) { double ctrlon = 0.0; int i; for (i=0;i<n;i++) { int ip = (i+1) % n; double phi1, phi2, dphi, lat1, lat2, dphi1, dphi2; double f1, f2, fac, fint; phi1 = x[ip]; phi2 = x[i]; lat1 = y[ip]; lat2 = y[i]; dphi = phi1 - phi2; if (dphi==0.0) continue; f1 = 0.5*(cos(lat1)*sin(lat1)+lat1); f2 = 0.5*(cos(lat2)*sin(lat2)+lat2); /* this will make sure longitude of centroid is at the same interval as the center of any grid */ if(dphi > M_PI) dphi = dphi - 2.0*M_PI; if(dphi < -M_PI) dphi = dphi + 2.0*M_PI; dphi1 = phi1 - clon; if( dphi1 > M_PI) dphi1 -= 2.0*M_PI; if( dphi1 <-M_PI) dphi1 += 2.0*M_PI; dphi2 = phi2 -clon; if( dphi2 > M_PI) dphi2 -= 2.0*M_PI; if( dphi2 <-M_PI) dphi2 += 2.0*M_PI; if(fabs(dphi2 -dphi1) < M_PI) { ctrlon -= dphi * (dphi1*f1+dphi2*f2)/2.0; } else { if(dphi1 > 0.0) fac = M_PI; else fac = -M_PI; fint = f1 + (f2-f1)*(fac-dphi1)/fabs(dphi); ctrlon -= 0.5*dphi1*(dphi1-fac)*f1 - 0.5*dphi2*(dphi2+fac)*f2 + 0.5*fac*(dphi1+dphi2)*fint; } } return (ctrlon*RADIUS*RADIUS); } /* poly_ctrlon */ /* ----------------------------------------------------------------------------- double box_ctrlat(double ll_lon, double ll_lat, double ur_lon, double ur_lat) This routine is used to calculate the latitude of the centroid. ---------------------------------------------------------------------------*/ double box_ctrlat(double ll_lon, double ll_lat, double ur_lon, double ur_lat) { double dphi = ur_lon-ll_lon; double ctrlat; if(dphi > M_PI) dphi = dphi - 2.0*M_PI; if(dphi < -M_PI) dphi = dphi + 2.0*M_PI; ctrlat = dphi*(cos(ur_lat) + ur_lat*sin(ur_lat)-(cos(ll_lat) + ll_lat*sin(ll_lat))); return (ctrlat*RADIUS*RADIUS); } /* box_ctrlat */ /*------------------------------------------------------------------------------ double box_ctrlon(double ll_lon, double ll_lat, double ur_lon, double ur_lat, double clon) This routine is used to calculate the lontitude of the centroid ----------------------------------------------------------------------------*/ double box_ctrlon(double ll_lon, double ll_lat, double ur_lon, double ur_lat, double clon) { double phi1, phi2, dphi, lat1, lat2, dphi1, dphi2; double f1, f2, fac, fint; double ctrlon = 0.0; int i; for( i =0; i<2; i++) { if(i == 0) { phi1 = ur_lon; phi2 = ll_lon; lat1 = lat2 = ll_lat; } else { phi1 = ll_lon; phi2 = ur_lon; lat1 = lat2 = ur_lat; } dphi = phi1 - phi2; f1 = 0.5*(cos(lat1)*sin(lat1)+lat1); f2 = 0.5*(cos(lat2)*sin(lat2)+lat2); if(dphi > M_PI) dphi = dphi - 2.0*M_PI; if(dphi < -M_PI) dphi = dphi + 2.0*M_PI; /* make sure the center is in the same grid box. */ dphi1 = phi1 - clon; if( dphi1 > M_PI) dphi1 -= 2.0*M_PI; if( dphi1 <-M_PI) dphi1 += 2.0*M_PI; dphi2 = phi2 -clon; if( dphi2 > M_PI) dphi2 -= 2.0*M_PI; if( dphi2 <-M_PI) dphi2 += 2.0*M_PI; if(fabs(dphi2 -dphi1) < M_PI) { ctrlon -= dphi * (dphi1*f1+dphi2*f2)/2.0; } else { if(dphi1 > 0.0) fac = M_PI; else fac = -M_PI; fint = f1 + (f2-f1)*(fac-dphi1)/fabs(dphi); ctrlon -= 0.5*dphi1*(dphi1-fac)*f1 - 0.5*dphi2*(dphi2+fac)*f2 + 0.5*fac*(dphi1+dphi2)*fint; } } return (ctrlon*RADIUS*RADIUS); } /* box_ctrlon */ /******************************************************************************* double grid_box_radius(double *x, double *y, double *z, int n); Find the radius of the grid box, the radius is defined the maximum distance between any two vertices *******************************************************************************/ double grid_box_radius(const double *x, const double *y, const double *z, int n) { double radius; int i, j; radius = 0; for(i=0; i<n-1; i++) { for(j=i+1; j<n; j++) { radius = max(radius, pow(x[i]-x[j],2.)+pow(y[i]-y[j],2.)+pow(z[i]-z[j],2.)); } } radius = sqrt(radius); return (radius); } /* grid_box_radius */ /******************************************************************************* double dist_between_boxes(const double *x1, const double *y1, const double *z1, int n1, const double *x2, const double *y2, const double *z2, int n2); Find the distance between any two grid boxes. The distance is defined by the maximum distance between any vertices of these two box *******************************************************************************/ double dist_between_boxes(const double *x1, const double *y1, const double *z1, int n1, const double *x2, const double *y2, const double *z2, int n2) { double dist; int i, j; dist = 0.0; for(i=0; i<n1; i++) { for(j=0; j<n2; j++) { dist = max(dist, pow(x1[i]-x2[j],2.)+pow(y1[i]-y2[j],2.)+pow(z1[i]-z2[j],2.)); } } dist = sqrt(dist); return (dist); } /* dist_between_boxes */ /******************************************************************************* int inside_edge(double x0, double y0, double x1, double y1, double x, double y) determine a point(x,y) is inside or outside a given edge with vertex, (x0,y0) and (x1,y1). return 1 if inside and 0 if outside. <y1-y0, -(x1-x0)> is the outward edge normal from vertex <x0,y0> to <x1,y1>. <x-x0,y-y0> is the vector from <x0,y0> to <x,y>. if Inner produce <x-x0,y-y0>*<y1-y0, -(x1-x0)> > 0, outside, otherwise inside. inner product value = 0 also treate as inside. *******************************************************************************/ int inside_edge(double x0, double y0, double x1, double y1, double x, double y) { const double SMALL = 1.e-12; double product; product = ( x-x0 )*(y1-y0) + (x0-x1)*(y-y0); return (product<=SMALL) ? 1:0; } /* inside_edge */ /* The following is a test program to test subroutines in create_xgrid.c */ #ifdef test_create_xgrid #include "create_xgrid.h" #include <math.h> #define D2R (M_PI/180) #define R2D (180/M_PI) #define MAXPOINT 1000 int main(int argc, char* argv[]) { double lon1_in[MAXPOINT], lat1_in[MAXPOINT]; double lon2_in[MAXPOINT], lat2_in[MAXPOINT]; double x1_in[MAXPOINT], y1_in[MAXPOINT], z1_in[MAXPOINT]; double x2_in[MAXPOINT], y2_in[MAXPOINT], z2_in[MAXPOINT]; double lon_out[20], lat_out[20]; double x_out[20], y_out[20], z_out[20]; int n1_in, n2_in, n_out, i, j; int nlon1=0, nlat1=0, nlon2=0, nlat2=0; int n; int ntest = 11; for(n=11; n<=ntest; n++) { switch (n) { case 1: /**************************************************************** test clip_2dx2d_great_cirle case 1: box 1: (20,10), (20,12), (22,12), (22,10) box 2: (21,11), (21,14), (24,14), (24,11) out : (21, 12.0018), (22, 12), (22, 11.0033), (21, 11) ****************************************************************/ n1_in = 4; n2_in = 4; /* first a simple lat-lon grid box to clip another lat-lon grid box */ lon1_in[0] = 20; lat1_in[0] = 10; lon1_in[1] = 20; lat1_in[1] = 12; lon1_in[2] = 22; lat1_in[2] = 12; lon1_in[3] = 22; lat1_in[3] = 10; lon2_in[0] = 21; lat2_in[0] = 11; lon2_in[1] = 21; lat2_in[1] = 14; lon2_in[2] = 24; lat2_in[2] = 14; lon2_in[3] = 24; lat2_in[3] = 11; break; case 2: /**************************************************************** test clip_2dx2d_great_cirle case 2: two identical box box 1: (20,10), (20,12), (22,12), (22,10) box 2: (20,10), (20,12), (22,12), (22,10) out : (20,10), (20,12), (22,12), (22,10) ****************************************************************/ lon1_in[0] = 20; lat1_in[0] = 10; lon1_in[1] = 20; lat1_in[1] = 12; lon1_in[2] = 22; lat1_in[2] = 12; lon1_in[3] = 22; lat1_in[3] = 10; for(i=0; i<n2_in; i++) { lon2_in[i] = lon1_in[i]; lat2_in[i] = lat1_in[i]; } break; case 3: /**************************************************************** test clip_2dx2d_great_cirle case 3: one cubic sphere grid close to the pole with lat-lon grid. box 1: (251.7, 88.98), (148.3, 88.98), (57.81, 88.72), (342.2, 88.72) box 2: (150, 88), (150, 90), (152.5, 90), (152.5, 88) out : (152.5, 89.0642), (150, 89.0165), (0, 90) ****************************************************************/ n1_in = 4; n2_in = 4; /* first a simple lat-lon grid box to clip another lat-lon grid box */ lon1_in[0] = 251.7; lat1_in[0] = 88.98; lon1_in[1] = 148.3; lat1_in[1] = 88.98; lon1_in[2] = 57.81; lat1_in[2] = 88.72; lon1_in[3] = 342.2; lat1_in[3] = 88.72; lon2_in[0] = 150; lat2_in[0] = 88; lon2_in[1] = 150; lat2_in[1] = 90; lon2_in[2] = 152.5; lat2_in[2] = 90; lon2_in[3] = 152.5; lat2_in[3] = 88; /* for(i=0; i<4; i++) { lon2_in[i] = lon1_in[i]; lat2_in[i] = lat1_in[i]; } */ break; case 4: /**************************************************************** test clip_2dx2d_great_cirle case 4: One box contains the pole box 1: (-160, 88.5354), (152.011, 87.8123) , (102.985, 88.4008), (20, 89.8047) box 2: (145,88), (145,90), (150,90), (150,88) out : (145.916, 88.0011), (145, 88.0249), (0, 90), (150, 88) ****************************************************************/ n1_in = 4; n2_in = 4; /* first a simple lat-lon grid box to clip another lat-lon grid box */ lon1_in[0] = -160; lat1_in[0] = 88.5354; lon1_in[1] = 152.011; lat1_in[1] = 87.8123; lon1_in[2] = 102.985; lat1_in[2] = 88.4008; lon1_in[3] = 20; lat1_in[3] = 89.8047; lon2_in[0] = 145; lat2_in[0] = 88; lon2_in[1] = 145; lat2_in[1] = 90; lon2_in[2] = 150; lat2_in[2] = 90; lon2_in[3] = 150; lat2_in[3] = 88; break; case 5: /**************************************************************** test clip_2dx2d_great_cirle case 5: One tripolar grid around the pole with lat-lon grid. box 1: (-202.6, 87.95), (-280, 89.56), (-100, 90), (-190, 88) box 2: (21,11), (21,14), (24,14), (24,11) out : (150, 88.7006), (145, 88.9507), (0, 90) ****************************************************************/ n1_in = 4; n2_in = 4; /* first a simple lat-lon grid box to clip another lat-lon grid box */ lon1_in[0] = -202.6; lat1_in[0] = 87.95; lon1_in[1] = -280.; lat1_in[1] = 89.56; lon1_in[2] = -100.0; lat1_in[2] = 90; lon1_in[3] = -190.; lat1_in[3] = 88; lon2_in[0] = 145; lat2_in[0] = 88; lon2_in[1] = 145; lat2_in[1] = 90; lon2_in[2] = 150; lat2_in[2] = 90; lon2_in[3] = 150; lat2_in[3] = 88; break; case 6: /**************************************************************** test clip_2dx2d_great_cirle case 6: One cubic sphere grid arounc the pole with one tripolar grid box around the pole. box 1: (-160, 88.5354), (152.011, 87.8123) , (102.985, 88.4008), (20, 89.8047) box 2: (-202.6, 87.95), (-280, 89.56), (-100, 90), (-190, 88) out : (170, 88.309), (157.082, 88.0005), (83.714, 89.559), (80, 89.6094), (0, 90), (200, 88.5354) ****************************************************************/ n1_in = 4; n2_in = 4; /* first a simple lat-lon grid box to clip another lat-lon grid box */ lon1_in[0] = -160; lat1_in[0] = 88.5354; lon1_in[1] = 152.011; lat1_in[1] = 87.8123; lon1_in[2] = 102.985; lat1_in[2] = 88.4008; lon1_in[3] = 20; lat1_in[3] = 89.8047; lon2_in[0] = -202.6; lat2_in[0] = 87.95; lon2_in[1] = -280.; lat2_in[1] = 89.56; lon2_in[2] = -100.0; lat2_in[2] = 90; lon2_in[3] = -190.; lat2_in[3] = 88; break; case 7: /**************************************************************** test clip_2dx2d_great_cirle case 7: One small grid box inside a big grid box. box 1: (20,10), (20,12), (22,12), (22,10) box 2: (18,8), (18,14), (24,14), (24,8) out : (20,10), (20,12), (22,12), (22,10) ****************************************************************/ n1_in = 4; n2_in = 4; /* first a simple lat-lon grid box to clip another lat-lon grid box */ lon1_in[0] = 20; lat1_in[0] = 10; lon1_in[1] = 20; lat1_in[1] = 12; lon1_in[2] = 22; lat1_in[2] = 12; lon1_in[3] = 22; lat1_in[3] = 10; lon2_in[0] = 18; lat2_in[0] = 8; lon2_in[1] = 18; lat2_in[1] = 14; lon2_in[2] = 24; lat2_in[2] = 14; lon2_in[3] = 24; lat2_in[3] = 8; break; case 8: /**************************************************************** test clip_2dx2d_great_cirle case 8: Cubic sphere grid at tile = 1, point (i=25,j=1) with N45 at (i=141,j=23) box 1: box 2: out : None ****************************************************************/ n1_in = 4; n2_in = 4; /* first a simple lat-lo n grid box to clip another lat-lon grid box */ lon1_in[0] = 350.0; lat1_in[0] = -45; lon1_in[1] = 350.0; lat1_in[1] = -43.43; lon1_in[2] = 352.1; lat1_in[2] = -43.41; lon1_in[3] = 352.1; lat1_in[3] = -44.98; lon2_in[0] = 350.0; lat2_in[0] = -46; lon2_in[1] = 350.0; lat2_in[1] = -44; lon2_in[2] = 352.5; lat2_in[2] = -44; lon2_in[3] = 352.5; lat2_in[3] = -46; break; case 9: /**************************************************************** test clip_2dx2d_great_cirle case 9: Cubic sphere grid at tile = 1, point (i=1,j=1) with N45 at (i=51,j=61) box 1: box 2: out : None ****************************************************************/ n1_in = 4; n2_in = 4; lon1_in[0] = 305.0; lat1_in[0] = -35.26; lon1_in[1] = 305.0; lat1_in[1] = -33.80; lon1_in[2] = 306.6; lat1_in[2] = -34.51; lon1_in[3] = 306.6; lat1_in[3] = -35.99; lon2_in[0] = 125; lat2_in[0] = 32; lon2_in[1] = 125; lat2_in[1] = 34; lon2_in[2] = 127.5; lat2_in[2] = 34; lon2_in[3] = 127.5; lat2_in[3] = 32; break; case 10: /**************************************************************** test clip_2dx2d_great_cirle case 10: Cubic sphere grid at tile = 3, point (i=24,j=1) with N45 at (i=51,j=46) box 1: box 2: out : None ****************************************************************/ n1_in = 4; n2_in = 4; lon1_in[0] = 125.0; lat1_in[0] = 1.46935; lon1_in[1] = 126.573; lat1_in[1] = 1.5091; lon1_in[2] = 126.573; lat1_in[2] = 0; lon1_in[3] = 125.0; lat1_in[3] = 0; lon2_in[0] = 125; lat2_in[0] = 0; lon2_in[1] = 125; lat2_in[1] = 2; lon2_in[2] = 127.5; lat2_in[2] = 2; lon2_in[3] = 127.5; lat2_in[3] = 0; break; case 11: /**************************************************************** test clip_2dx2d_great_cirle case 10: Cubic sphere grid at tile = 3, point (i=24,j=1) with N45 at (i=51,j=46) box 1: box 2: out : ****************************************************************/ nlon1 = 1; nlat1 = 1; nlon2 = 1; nlat2 = 1; n1_in = (nlon1+1)*(nlat1+1); n2_in = (nlon2+1)*(nlat2+1); lon1_in[0] = 350.0; lat1_in[0] = 90.00; lon1_in[1] = 170.0; lat1_in[1] = 87.92; lon1_in[2] = 260.0; lat1_in[2] = 87.92; lon1_in[3] = 215.0; lat1_in[3] = 87.06; /* lon1_in[0] = 35.0; lat1_in[0] = 87.06; */ /* lon1_in[1] = 80.0; lat1_in[1] = 87.92; */ /* lon1_in[2] = 125.0; lat1_in[2] = 87.06; */ /* lon1_in[3] = 350.0; lat1_in[3] = 87.92; */ /* lon1_in[4] = 350.0; lat1_in[4] = 90.00; */ /* lon1_in[5] = 170.0; lat1_in[5] = 87.92; */ /* lon1_in[6] = 305.0; lat1_in[6] = 87.06; */ /* lon1_in[7] = 260.0; lat1_in[7] = 87.92; */ /* lon1_in[8] = 215.0; lat1_in[8] = 87.06; */ lon2_in[0] = 167.5; lat2_in[0] = 88; lon2_in[1] = 170; lat2_in[1] = 88; lon2_in[2] = 167.5; lat2_in[2] = 90; lon2_in[3] = 170; lat2_in[3] = 90; /* nlon1 = 3; */ /* nlat1 = 2; */ /* nlon2 = 1; */ /* nlat2 = 1; */ /* n1_in = (nlon1+1)*(nlat1+1); */ /* n2_in = (nlon2+1)*(nlat2+1); */ /* lon1_in[0] = 35.00; lat1_in[0] = -59.90; */ /* lon1_in[1] = 37.64; lat1_in[1] = -58.69; */ /* lon1_in[2] = 40.07; lat1_in[2] = -57.44; */ /* lon1_in[3] = 42.32; lat1_in[3] = -56.15; */ /* lon1_in[4] = 32.36; lat1_in[4] = -58.69; */ /* lon1_in[5] = 35.00; lat1_in[5] = -57.56; */ /* lon1_in[6] = 37.45; lat1_in[6] = -56.39; */ /* lon1_in[7] = 39.74; lat1_in[7] = -55.18; */ /* lon1_in[8] = 29.93; lat1_in[8] = -57.44; */ /* lon1_in[9] = 32.55; lat1_in[9] = -56.39; */ /* lon1_in[10] = 35.00; lat1_in[10] = -55.29; */ /* lon1_in[11] = 37.30; lat1_in[11] = -54.16; */ /* lon2_in[0] = 35; lat2_in[0] = -58; */ /* lon2_in[1] = 37.5; lat2_in[1] = -58; */ /* lon2_in[2] = 35; lat2_in[2] = -56; */ /* lon2_in[3] = 37.5; lat2_in[3] = -56; */ /* nlon1 = 1; */ /* nlat1 = 1; */ /* nlon2 = 1; */ /* nlat2 = 1; */ /* n1_in = (nlon1+1)*(nlat1+1); */ /* n2_in = (nlon2+1)*(nlat2+1); */ /* lon1_in[0] = 305; lat1_in[0] = -35.26; */ /* lon1_in[1] = 306; lat1_in[1] = -35.99; */ /* lon1_in[2] = 305; lat1_in[2] = -33.80; */ /* lon1_in[3] = 306; lat1_in[3] = -34.51; */ /* lon2_in[0] = 305; lat2_in[0] = -34; */ /* lon2_in[1] = 307.5; lat2_in[1] = -34; */ /* lon2_in[2] = 305; lat2_in[2] = -32; */ /* lon2_in[3] = 307.5; lat2_in[3] = -32; */ nlon1 = 2; nlat1 = 2; nlon2 = 1; nlat2 = 1; n1_in = (nlon1+1)*(nlat1+1); n2_in = (nlon2+1)*(nlat2+1); lon1_in[0] = 111.3; lat1_in[0] = 1.591; lon1_in[1] = 109.7; lat1_in[1] = 2.926; lon1_in[2] = 108.2; lat1_in[2] = 4.256; lon1_in[3] = 110.0; lat1_in[3] = 0.000; lon1_in[4] = 108.4; lat1_in[4] = 1.335; lon1_in[5] = 106.8; lat1_in[5] = 2.668; lon1_in[6] = 108.7; lat1_in[6] = -1.591; lon1_in[7] = 107.1; lat1_in[7] = -0.256; lon1_in[8] = 105.5; lat1_in[8] = 1.078; lon2_in[0] = 107.5; lat2_in[0] = 0; lon2_in[1] = 110; lat2_in[1] = 0; lon2_in[2] = 107.5; lat2_in[2] = 2; lon2_in[3] = 110; lat2_in[3] = 2; break; case 12: /**************************************************************** test : create_xgrid_great_circle box 1: (20,10), (20,12), (22,12), (22,10) box 2: (21,11), (21,14), (24,14), (24,11) out : (21, 12.0018), (22, 12), (22, 11.0033), (21, 11) ****************************************************************/ nlon1 = 2; nlat1 = 2; nlon2 = 3; nlat2 = 3; n1_in = (nlon1+1)*(nlat1+1); n2_in = (nlon2+1)*(nlat2+1); /* first a simple lat-lon grid box to clip another lat-lon grid box */ for(j=0; j<=nlat1; j++) for(i=0; i<=nlon1; i++){ lon1_in[j*(nlon1+1)+i] = 20.0 + (i-1)*2.0; lat1_in[j*(nlon1+1)+i] = 10.0 + (j-1)*2.0; } for(j=0; j<=nlat2; j++) for(i=0; i<=nlon2; i++){ lon2_in[j*(nlon2+1)+i] = 19.0 + (i-1)*2.0; lat2_in[j*(nlon2+1)+i] = 9.0 + (j-1)*2.0; } break; case 13: nlon1 = 1; nlat1 = 1; nlon2 = 1; nlat2 = 1; n1_in = (nlon1+1)*(nlat1+1); n2_in = (nlon2+1)*(nlat2+1); /* lon1_in[0] = ; lat1_in[0] = ; */ /* lon1_in[1] = ; lat1_in[1] = ; */ /* lon1_in[2] = ; lat1_in[2] = ; */ /* lon1_in[3] = ; lat1_in[3] = ; */ /* lon2_in[0] = ; lat2_in[0] = ; */ /* lon2_in[1] = ; lat2_in[1] = ; */ /* lon2_in[2] = ; lat2_in[2] = ; */ /* lon2_in[3] = ; lat2_in[3] = ; */ /* lon1_in[0] = 1.35536; lat1_in[0] = 1.16251; */ /* lon1_in[1] = 1.36805; lat1_in[1] = 1.15369; */ /* lon1_in[2] = 1.37843; lat1_in[2] = 1.16729; */ /* lon1_in[3] = 1.39048; lat1_in[3] = 1.15826; */ /* lon2_in[0] = 1.34611; lat2_in[0] = 1.16372; */ /* lon2_in[1] = 1.35616; lat2_in[1] = 1.15802; */ /* lon2_in[2] = 1.35143; lat2_in[2] = 1.16509; */ /* lon2_in[3] = 1.36042; lat2_in[3] = 1.15913; */ /* lon1_in[0] = 12.508065121288551; lat1_in[0] = -87.445883646793547; */ /* lon1_in[1] = 325.425637772; lat1_in[1] = -86.481216821859505; */ /* lon1_in[2] = 97.5; lat1_in[2] = -89.802136057677174; */ /* lon1_in[3] = 277.5; lat1_in[3] = -87.615232005344637; */ /* for(j=0; j<=nlat2; j++) for(i=0; i<=nlon2; i++) { */ /* lon2_in[j*(nlon2+1)+i] = -280.0 + i*1.0; */ /* lat2_in[j*(nlon2+1)+i] = -90.0 + j*8.0; */ /* } */ lon1_in[0] = 120.369397984526174; lat1_in[0] = 16.751543427495864; lon1_in[1] = 119.999999999999986; lat1_in[1] = 16.751871929590038; lon1_in[2] = 120.369397846883501; lat1_in[2] = 16.397797979598028; lon1_in[3] = 119.999999999999986; lat1_in[3] = 16.398120477217255; lon2_in[0] = 120.369415056522087; lat2_in[0] = 16.752176828509153; lon2_in[1] = 119.999999999999986; lat2_in[1] = 16.752505523196167; lon2_in[2] = 120.369415056522087; lat2_in[2] = 16.397797949548146; lon2_in[3] = 119.999999999999986; lat2_in[3] = 16.398120477217255; break; default: error_handler("test_create_xgrid: incorrect case number"); } /* convert to radian */ for(i=0; i<n1_in; i++) { lon1_in[i] *= D2R; lat1_in[i] *=D2R; } for(i=0; i<n2_in; i++) { lon2_in[i] *= D2R; lat2_in[i] *=D2R; } printf("\n*********************************************************\n"); printf("\n Case %d \n", n); printf("\n*********************************************************\n"); if( n > 10 ) { int nxgrid; int *i1, *j1, *i2, *j2; double *xarea, *xclon, *xclat, *mask1; mask1 = (double *)malloc(nlon1*nlat1*sizeof(double)); i1 = (int *)malloc(MAXXGRID*sizeof(int)); j1 = (int *)malloc(MAXXGRID*sizeof(int)); i2 = (int *)malloc(MAXXGRID*sizeof(int)); j2 = (int *)malloc(MAXXGRID*sizeof(int)); xarea = (double *)malloc(MAXXGRID*sizeof(double)); xclon = (double *)malloc(MAXXGRID*sizeof(double)); xclat = (double *)malloc(MAXXGRID*sizeof(double)); for(i=0; i<nlon1*nlat1; i++) mask1[i] = 1.0; nxgrid = create_xgrid_great_circle(&nlon1, &nlat1, &nlon2, &nlat2, lon1_in, lat1_in, lon2_in, lat2_in, mask1, i1, j1, i2, j2, xarea, xclon, xclat); printf("\n*********************************************************\n"); printf("\n First input grid box longitude, latitude \n \n"); for(i=0; i<n1_in; i++) printf(" %g, %g \n", lon1_in[i]*R2D, lat1_in[i]*R2D); printf("\n Second input grid box longitude, latitude \n \n"); for(i=0; i<n2_in; i++) printf(" %g, %g \n", lon2_in[i]*R2D, lat2_in[i]*R2D); printf("\n Number of exchange grid is %d\n", nxgrid); for(i=0; i<nxgrid; i++) { printf("(i1,j1)=(%d,%d), (i2,j2)=(%d, %d), xgrid_area=%g, xgrid_clon=%g, xgrid_clat=%g\n", i1[i], j1[i], i2[i], j2[i], xarea[i], xclon[i], xclat[i]); } /* comparing the area sum of exchange grid and grid1 area */ { double *x1, *y1, *z1, *area1; double area_sum; int i; area_sum = 0.0; for(i=0; i<nxgrid; i++) { area_sum+= xarea[i]; } area1 = (double *)malloc((nlon1)*(nlat1)*sizeof(double)); get_grid_great_circle_area_(&nlon1, &nlat1, lon1_in, lat1_in, area1); printf("xgrid area sum is %g, grid 1 area is %g\n", area_sum, area1[0]); } printf("\n"); free(i1); free(i2); free(j1); free(j2); free(xarea); free(xclon); free(xclat); free(mask1); } else { latlon2xyz(n1_in, lon1_in, lat1_in, x1_in, y1_in, z1_in); latlon2xyz(n2_in, lon2_in, lat2_in, x2_in, y2_in, z2_in); n_out = clip_2dx2d_great_circle(x1_in, y1_in, z1_in, 4, x2_in, y2_in, z2_in, n2_in, x_out, y_out, z_out); xyz2latlon(n_out, x_out, y_out, z_out, lon_out, lat_out); printf("\n*********************************************************\n"); printf("\n First input grid box longitude, latitude \n \n"); for(i=0; i<n1_in; i++) printf(" %g, %g \n", lon1_in[i]*R2D, lat1_in[i]*R2D); printf("\n Second input grid box longitude, latitude \n \n"); for(i=0; i<n2_in; i++) printf(" %g, %g \n", lon2_in[i]*R2D, lat2_in[i]*R2D); printf("\n output clip grid box longitude, latitude for case 1 \n \n"); for(i=0; i<n_out; i++) printf(" %g, %g \n", lon_out[i]*R2D, lat_out[i]*R2D); printf("\n"); } } } #endif
pool-split.c
/* This file is part of Primer Pooler (c) Silas S. Brown. For Wen. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #include <limits.h> #include <time.h> #include <signal.h> #define NDEBUG #include <assert.h> #include "openmp.h" #include "all-primers.h" #include "triangle.h" #include "numbers.h" #include "memcheck.h" #define USE_QSORT 0 /* set this to 1 if you want to qsort the moves and pick either the best or next-best at random; 0 (faster) to take only the best move. (TODO: user configurable?) */ #define PARALLELIZE_POOLSPLIT 1 /* having this at 1 can help find solutions more quickly (starts with a different initial randomization on each core)... */ #define PARALLELIZE_BESTMOVE 0 /* ...but setting this to 1 can make things SLOWER :-( unless dealing with a HUGE number of primer PAIRS (say, >2000 primers for >1000 pairs), otherwise the loop is too small for the parallelisation speedup to outweigh the thread-setup overhead */ /* Proposed move N = move primer N/(nPools-1) into pool currentPool + 1 + (N % (nPools-1)) (mod nPools). (Therefore every move MOVES something; no 'no-op's, since 1+(m mod (range-1)) cannot express going back to start) Immediate value = badnessContrib(cur) - badnessC(dest) We could save some division in qsort's inner loop by maintaing a separate array, but would need to profile as might be outweighed by worse locality of reference. These variables need to be available at module scope so that our qsort comparison function can get at them: */ #if USE_QSORT static int *qsort_pools, qsort_nPools, *qsort_poolCounts, qsort_maxCount; static ULL *qsort_bContrib; /* otherwise we WON'T have them global, and we can be more multi-threaded w/out having to stall every time we call qsort (unless hard-code a pool of comparison funcs). The qsort version is slower ANYWAY and off by default, so I'm not too worried about having to stall for it. */ #endif /* badness as ULL: bits 63-48: unsigned short maxScore (or invalid) bits 47-32: unsigned short num with this score bits 31-16: unsigned short num with this score - 1 bits 15-0: unsigned short num with this score - 2 Note: bit 63 is always 0, so it's safe to cast to LL and do a subtraction to compare which is better of two states for qsort. But 'value of move' is NOT equal to the DISTANCE between two of these; use valueOfReduction */ typedef unsigned short US; enum { InvalidCombination=0x4000 }; /* (also mentioned in 64.h etc) If score is real score, 129 should do if no primers can be that long in this program. But we'll need it higher if score is really -dG*10 (it's -dG*2 at the moment). Don't approach 0x8000 though, + allow room for possible overflow of next field (so no 0x7FFF), although subtractBadness / valueOfReduction might need attention if we actually come to expect overflows between fields */ static inline int maxScoreOfBadness(ULL badness) { return (int)(badness>>48); } static inline void updateBadness(ULL *badness,int score) { int max = maxScoreOfBadness(*badness); assert(max <= InvalidCombination); assert(score <= InvalidCombination); if (score > max) { if (score > max+2) { *badness = ((ULL)score << 48) | ((ULL)1 << 32); return; } else while(score > max) { /* (TODO: could just write out the 2 cases of this loop, and change the below shifts into AND and add, IF profiling shows this needs it) */ *badness = ((*badness >> 16) & 0xFFFFFFFFLL) | ((((*badness)>>48)+1)<<48); max++; } } /* now score <= max */ int lswScore = max - 2; if(score < lswScore) return; /* this score is too insignificant to count at the current maximum level */ int sL = (score-lswScore)*16; if(((*badness>>sL)&0xFFFF)==0xFFFF) return; /* saturated */ *badness += ((ULL)1 << sL); assert(maxScoreOfBadness(*badness) == max); } static inline int subtractBadness(ULL *badness,int score) { /* for incremental updates. Assume score has previously been included in updateBadness, so we don't have to worry about crossing 0 here. */ int max = maxScoreOfBadness(*badness); assert(score <= max); int lswScore = max - 2; if(score < lswScore) return 0; /* this score is too insignificant to affect the counters */ int sL = (score-lswScore)*16; if(((*badness>>sL)&0xFFFF)==0xFFFF) return 0; /* if it was saturated, we'll have to leave it "stuck" there I'm afraid (unless return 1 to recalculate, but in many cases it would just saturate again) */ *badness -= ((ULL)1 << sL); return !((*badness>>32)&0xFFFF); /* recalc max if count(max)==0 */ } static inline ULL valueOfReduction(ULL from,ULL to) { /* for qsort etc. We COULD return negative values as LL, but that would be more computation and we might as well just return 0, since we won't perform any value-based moves that don't have positive reductions */ if(to > from) return 0; if(!to) return from; if(maxScoreOfBadness(from) == maxScoreOfBadness(to)) { /* 'from-to' will be a 48-bit value, and if get here then the high 16 bits of 'to' will be <= 'from', but if the mid 16 bits are >= then we need to set all of the low 32 bits to 0, and if the bottom 16 bits are >= then we need to set low 16 to 0. */ ULL hi = (from & ((ULL)0xFFFF<<32)) - (to & ((ULL)0xFFFF<<32)), /* do NOT factor out the & part! */ mid1 = from & ((ULL)0xFFFF<<16), mid2 = to & ((ULL)0xFFFF<<16); if (mid2 > mid1) return hi; ULL lo1 = from & 0xFFFF, lo2 = to & 0xFFFF; if (lo2 > lo1) return hi | (mid1-mid2); return from-to; } /* maxScoreOfBadness(from) > maxScoreOfBadness(to) : at the very least we need to 0-out the bottom 48 bits (TODO: we might be able to add some back in if maxScoreOfBadness(from) <= maxScoreOfBadness(to)+2, but probably OK just to do this for now...) */ return (from & ((ULL)0xFFFF<<48)) - (to & ((ULL)0xFFFF<<48)); } static inline int primerOfMove(int m,int nPools) { return m/(nPools-1); } static inline int oldPoolOfMove(int m,int nPools,const int* pools) { return pools[primerOfMove(m,nPools)]; } static inline int poolOfMove(int m,int nPools,const int* pools) { return (oldPoolOfMove(m,nPools,pools)+1+(m % (nPools-1))) % nPools; } static inline int primerAndDest_to_moveNo(int primer,int newPool,int nPools,const int *pools) { // works only if newPool != current pool return ((newPool+nPools-pools[primer]-1) % nPools) + primer*(nPools-1); } static inline int primerAndPool_to_contribOffset(int primer,int pool,int nPools) { return primer * nPools + pool; } static inline int move_to_contribOffset(int m,int nPools,const int* pools) { return primerAndPool_to_contribOffset(primerOfMove(m,nPools),poolOfMove(m,nPools,pools),nPools); } static inline ULL valueOfMove(int m,int nPools,const int* pools,const ULL*bContrib,const int *poolCounts,int maxCount) { if(maxCount && poolCounts[poolOfMove(m,nPools,pools)]==maxCount) return 0; assert(!maxCount || poolCounts[poolOfMove(m,nPools,pools)]<maxCount); ULL from = bContrib[primerAndPool_to_contribOffset(primerOfMove(m,nPools),oldPoolOfMove(m,nPools,pools),nPools)], /* what this primer was contributing to its old pool */ to = bContrib[move_to_contribOffset(m,nPools,pools)]; /* what this primer will contribute to its new pool */ assert(!to || poolCounts[poolOfMove(m,nPools,pools)]); return valueOfReduction(from,to); } #if USE_QSORT static int betterMoves1st(const void *aP, const void *bP){ int a = *(int*)aP, b = *(int*)bP; ULL vA = valueOfMove(a,qsort_nPools,qsort_pools,qsort_bContrib,qsort_poolCounts,qsort_maxCount), vB = valueOfMove(b,qsort_nPools,qsort_pools,qsort_bContrib,qsort_poolCounts,qsort_maxCount); /* Can't just subtract as it'll overflow an int */ if (vA > vB) return -1; /* A is better, put first */ else if (vA == vB) return 0; else return 1; } #else static int findBestMove(const int *moves,int numMoves,int nPools,const int* pools,const ULL*bContrib,const int*poolCounts,int maxCount) { int bestMove = moves[0]; ULL bestVal = valueOfMove(bestMove,nPools,pools,bContrib,poolCounts,maxCount); #if PARALLELIZE_BESTMOVE && defined(_OPENMP) #pragma omp parallel #endif { int priv_bestMove = bestMove; ULL priv_bestVal = bestVal; int i; #if PARALLELIZE_BESTMOVE && defined(_OPENMP) #pragma omp for schedule(static) #endif for(i=1; i<numMoves; i++) { ULL thisVal = valueOfMove(moves[i],nPools,pools,bContrib,poolCounts,maxCount); if(thisVal > priv_bestVal) { priv_bestVal = thisVal; priv_bestMove = moves[i]; } } if(priv_bestVal > bestVal) { #if PARALLELIZE_BESTMOVE && defined(_OPENMP) #pragma omp critical #endif if (priv_bestVal > bestVal) { bestVal = priv_bestVal; bestMove = priv_bestMove; } } } return bestMove; } #endif /* and here is code to set up & maintain that bContrib: */ static void badnessContrib(int primer,const int *scores,int np,int nPools,const int *pools,ULL *bContrib) { /* Assuming proposedPools[0:nPools] == 0 on entry, set proposedPools[0:nPools] to answer the Q: What contribution to the overall "badness" would primer make, assuming it were moved to (or left as-is in) proposedPools[n] and no other changes were made? */ assert(primer>=0 && primer<np); ULL *proposedPools = bContrib + primer*nPools; int i; for(i=0; i<primer; i++) { /* ( <primer , primer ) */ int pool = pools[i]; assert(pool>=0 && pool<nPools); updateBadness(proposedPools+pool,scores[primer-i]); /* if we put 'primer' in the same pool as 'i' is, we'll get the badness of the interaction between i and primer */ scores += (np-i); } ++scores; /* i==primer, ignore interaction w. itself */ for(++i; i<np; i++,scores++) { /* ( primer, >primer ) */ int pool = pools[i]; assert(pool>=0 && pool<nPools); updateBadness(proposedPools+pool,*scores); } } static void badnessContribUpdate(int primer,const int *scores,int np,int otherPrimer,int otherOldPool,int nPools,const int *pools,ULL *bContrib,const int *poolCounts) { /* as above but just incrementally update primer's proposedPools in light of the fact that otherPrimer has just moved from otherOldPool to its current pool */ assert(primer>=0 && primer<np && otherPrimer>=0 && otherPrimer<np && otherOldPool>=0 && otherOldPool<nPools); ULL *proposedPools = bContrib + primer*nPools; int s, otherNewPool = pools[otherPrimer]; assert(otherNewPool>=0 && otherNewPool<nPools); if(otherPrimer == primer) return; s = scores[t_offset(np,primer,otherPrimer)]; /* the score-contribution of interaction between primer and otherPrimer */ if(!poolCounts[otherOldPool]) proposedPools[otherOldPool] = 0; /* like the loop below but also clears any saturation (since we know there won't be saturation if the pool was left empty) */ else if(subtractBadness(proposedPools+otherOldPool,s)) { /* oops, need to recalc the max of otherOldPool */ proposedPools[otherOldPool] = 0; int i; for(i=0; i<primer; i++) { /* ( <primer , primer ) */ if(pools[i] == otherOldPool) updateBadness(proposedPools+otherOldPool,scores[primer-i]); scores += (np-i); } ++scores; for(++i; i<np; i++,scores++) { if(pools[i]==otherOldPool) updateBadness(proposedPools+otherOldPool,*scores); } } updateBadness(proposedPools+otherNewPool,s); } static ULL globalBadness(const int *score,int np,const int *pools) { /* Measure across all primers in all pools. We might perhaps be able to optimise this by making use of what we've already calculated in bContrib, but this function is called only when we get to local maxima. */ int i,j; ULL m=0; for(i=0; i<np; i++) for(j=i; j<np; j++) if(pools[i]==pools[j]) updateBadness(&m,*score++); else score++; return m; } static inline void make_a_move(int m,int np,const int *scores,const int *primerMove_depends_on,int nPools,int *pools,ULL *bContrib,int *poolCounts,int maxCount) { int primer = primerOfMove(m,nPools), oldPool = oldPoolOfMove(m,nPools,pools), newPool = poolOfMove(m,nPools,pools); assert(primer >= 0 && primer < np && oldPool>=0 && newPool>=0 && oldPool<nPools && newPool<nPools && oldPool != newPool); pools[primer] = newPool; /* 'm' changes meaning now */ assert(poolCounts[oldPool]); poolCounts[oldPool]--; poolCounts[newPool]++; assert(!maxCount || poolCounts[newPool]<=maxCount); int i; for(i=0; i<np; i++) { if(primerMove_depends_on[i]==primer) pools[i] = newPool; /* see merge_scores_of_stuckTogether_primers (and DON'T need to update poolCounts here) */ else badnessContribUpdate(i,scores,np,primer,oldPool,nPools,pools,bContrib,poolCounts); } } static inline int should_stick_together(AllPrimers ap,int i,int j) { /* Names same except last letter = keep in same pool */ const char *n1=ap.names[i], *n2=ap.names[j]; size_t l1=strlen(n1),l2=strlen(n2); return (l1 == l2 && !strncmp(n1,n2,l1-1)); /* TODO: case-insensitive? */ } static inline void updateMax(int *i,int m) { if(m>*i) *i=m; } static int* merge_scores_of_stuckTogether_primers(AllPrimers ap,int *scores) { int i,j,*p=scores; if(!p) return NULL; int *primerMove_depends_on=malloc(ap.np*sizeof(int)); if(!primerMove_depends_on) return NULL; memset(primerMove_depends_on,0xFF,ap.np*sizeof(int)); char *pairedOK=malloc(ap.np); if(!pairedOK) { free(primerMove_depends_on); return NULL; } memset(pairedOK,0,ap.np); int doneMerge = 0; for(i=0; i<ap.np; i++) for(j=i; j<ap.np; j++) { if(i!=j && primerMove_depends_on[i]==-1 && primerMove_depends_on[j]==-1 && should_stick_together(ap,i,j)) { /* For simplicity of pooling, we'll set it so: - Interactions with i get maxed with those w.j - Interactions with j itself "don't count" - j is not allowed to be moved by itself - j is always moved when i moves */ *p = 0; /* so S(i,j) = 0 */ int k,*kp=scores; /* max S(k,i) with S(k,j): */ int *Sip=0; /* =0 to suppress compiler warning */ for(k=0; k<j; k++) { if(k<i) { updateMax(kp+i-k,kp[j-k]); kp[j-k]=0; } else if(k==i) { Sip = kp+1; /* needed for S(i,k) */ } else { /* max S(i,k) with S(k,j) */ updateMax(Sip++,kp[j-k]); kp[j-k]=0; } kp += (ap.np-k); } k++; kp++; Sip++; /* ignore k==j */ for(;k<ap.np;k++) { /* max S(i,k) [=Sip] with S(j,k) [=kp] */ updateMax(Sip++,*kp); *kp++=0; } primerMove_depends_on[j] = i; doneMerge = pairedOK[i] = pairedOK[j] = 1; } p++; } if(doneMerge) { /* just check for lone primers, usually a bad sign */ for(i=0; i<ap.np; i++) if(!pairedOK[i]) fprintf(stderr,"Warning: ungrouped primer %s\n",ap.names[i]); } else { /* same message as in amplicons.c (see comment there) in case overlap-check was missed */ fputs("WARNING: No primers are paired!\nPlease end your forward primers with -F\nand your reverse primers with -R or -B as instructed\n",stderr); } free(pairedOK); return primerMove_depends_on; } static inline int should_stick_to_pool(AllPrimers ap,int i) { /* if the user wants some primers to be fixed to specific pools (and we move the rest around) */ const char *n=ap.names[i]; if(*n == '@' && *(++n)>='0' && *n<='9') { char *end; int pool=(int)strtol(n,&end,10); if(*end==':') { /* we have a valid @<pool number>: */ return pool-1; /* (internally start at 0) */ } } return -1; } static int* pre_fix_primers_to_pools(AllPrimers ap) { int *fix_to_pool=malloc(ap.np*sizeof(int)), i; if(!fix_to_pool) return NULL; for(i=0; i<ap.np; i++) fix_to_pool[i] = should_stick_to_pool(ap,i); return fix_to_pool; } static void saturate_scores_of_overlapping_primers(int *scores,const char *overlappingAmplicons,const int *primerNoToAmpliconNo,int nAmplicons,int np) { int i,j,*p=scores; if(!p || !nAmplicons) return; assert(overlappingAmplicons); for(i=0; i<np; i++) for(j=i; j<np; j++) { assert(*p<InvalidCombination); if(i!=j && primerNoToAmpliconNo[i]!=-1 && primerNoToAmpliconNo[j]!=-1 && overlappingAmplicons[primerNoToAmpliconNo[i]*nAmplicons+primerNoToAmpliconNo[j]]) *p = InvalidCombination; p++; } } static void printNumInEachPool(const int *poolCounts,int numPools) { fprintf(stderr,"\tPool sizes: "); int i; for(i=0; i<numPools; i++) { if(i) fprintf(stderr,"|"); fprintf(stderr,"%d",poolCounts[i] << 1); /* TODO: this "<< 1" assumes countOf(primerMove_depends_on==-1) == np/2, but that is almost certainly going to be the case, unless somebody is doing something very strange, and if the worst comes to the worst it's only an informational pool-size display going a bit wrong */ } fprintf(stderr,"\n"); } static int IntCompare(const void *a,const void *b) { return *(const int*)b-*(const int*)a; } static int* numInEachPool(const int *pools,int np,int numPools,const int *primerMove_depends_on) { /* for after everything has finished and the per-thread poolCounts has been freed */ int* counts=calloc(numPools,sizeof(int)); if(!counts) return NULL; int i; for(i=0; i<np; i++) if(primerMove_depends_on[i]==-1) counts[pools[i]]++; qsort(counts,numPools,sizeof(int),IntCompare); return counts; } enum { s_KeepGoing = 0, s_ccPressed, s_tooManyIters }; static volatile int stop_state; static void intHandler(int s) { stop_state = s_ccPressed; } static void randomise_pools(int np,const int *primerMove_depends_on,const int *fix_to_pool,const int *scores,int nPools,int *pools,ULL *bContrib,int *poolCounts,int maxCount) { /* initialise to random distribution of pools, but note primerMove_depends_on and maxCount when doing this. Also initialise bContrib. */ int i; memset(poolCounts,0,nPools*sizeof(int)); /* First set all fixed-pool primers in place, before randomising the others around them */ for(i=0; i<np; i++) if(primerMove_depends_on[i] == -1) { int pool = fix_to_pool[i]; if(pool != -1) { if(maxCount && poolCounts[pool]==maxCount && !(maxCount==1 && nPools==np)) { /* (last part of that condition detects call by suggest_num_pools, where it's OK if fixed-pool primers make us exceed 1 per pool) */ fprintf(stderr, "randomise_pools ERROR: maxCount too small for fixed primer in pool %d\n",fix_to_pool[i]); abort(); } pools[i]=pool; poolCounts[pool]++; } } for(i=0; i<np; i++) if(primerMove_depends_on[i] == -1 && fix_to_pool[i] == -1) { int pool = ThreadRand() % nPools; int origPool = pool; while(maxCount && poolCounts[pool]>=maxCount) { pool++; /* not very random but it'll do for now */ if(pool==nPools) pool=0; if(pool==origPool) { fprintf(stderr, "randomise_pools ERROR: maxCount too small, can't fit\n"); abort(); } } pools[i]=pool; poolCounts[pool]++; } for(i=0; i<np; i++) if(primerMove_depends_on[i]>-1) /* DON'T update poolCounts here (it's in pairs so moveTooLopsided doesn't have to account for this one) */ pools[i]=pools[primerMove_depends_on[i]]; memset(bContrib,0,np*nPools*sizeof(ULL)); for(i=0; i<np; i++) badnessContrib(i,scores,np,nPools,pools,bContrib); } static int* initMoves(int *numMoves,int np,int nPools,const int *primerMove_depends_on,const int *fix_to_pool) { if(nPools <= 1) return NULL; int *moves=malloc(np*(nPools-1)*sizeof(int)); if(moves) { int *movesP = moves, i; for(i=0; i<np*(nPools-1); i++) { int primer = primerOfMove(i,nPools); if(primerMove_depends_on[primer]==-1 && fix_to_pool[primer]==-1) *movesP++ = i; } *numMoves = movesP-moves; moves = memTrim(moves,movesP); } return moves; } #if Has_128bit typedef bit128 ThreadMask; #else typedef ULL ThreadMask; #endif static void poolsplit_thread(const int* shared_moves,AllPrimers ap,int nPools,int numMoves,const int* primerMove_depends_on,const int* fix_to_pool,const int* scores,time_t limitTime,int *bestPools,const float* table, int* bestPools_init_yet,ULL* gBadLast,long *totalIterations,time_t *lastOutputTime,int *overlaps,int* just_printed_counts,ThreadMask* threads_needing_to_reset_iter,int maxCount) { /* This is the inner part of split_into_pools. Multiple instances may be called in parallel. */ int iter = 0, willContinue=1; int *moves = (int*)shared_moves; ULL *bContrib = malloc(ap.np*nPools*sizeof(ULL)); int *poolCounts=malloc(nPools*sizeof(int)); int *pools = NULL; if(memFail(bContrib,poolCounts,_memFail)) willContinue = 0; else { pools = malloc(ap.np*sizeof(int)); if(memFail(pools,_memFail)) willContinue = 0; else if(USE_QSORT) { /* moves must be per-thread */ moves=malloc(numMoves*sizeof(int)); if(memFail(moves,_memFail)) willContinue = 0; else wrapped_memcpy(moves,shared_moves,numMoves*sizeof(int)); } } ThreadMask myMask = ((ThreadMask)1) << omp_get_thread_num(); /* for threads_needing_to_reset_iter */ if (!myMask) { /* what, somebody's running us on >128 cores ?? (or >64 32-bit cores) */ /* (versions below v1.16 would hit this after 32 cores and not detect it) */ #if defined(_OPENMP) #pragma omp critical #endif fprintf(stderr,"Can't run thread number %d because ThreadMask type has only %d bits\n",omp_get_thread_num(),(int)sizeof(ThreadMask)*8); /* If you hit this, I suggest you either find a wider ThreadMask type or else we'd better make it an array. Haven't done it so far because I've tested only on a 4-core machine and I doubt the chances of being run on many more cores than that are particularly high in 2016 (future might be different) */ willContinue = 0; } int max_iterations = 10000000 /* TODO: customise? profile? (but low priority as we have an interrupt mechanism) */ / (omp_get_num_threads() > 10 ? 10 : omp_get_num_threads()); /* TODO: customise this "10" as well? (it's maxMoves / minMoves) */ while(willContinue) { randomise_pools(ap.np,primerMove_depends_on,fix_to_pool,scores,nPools,pools,bContrib,poolCounts,maxCount); for(; ; iter++) { #if USE_QSORT #if PARALLELIZE_POOLSPLIT && defined(_OPENMP) #pragma omp critical #endif { qsort_pools = pools; qsort_nPools = nPools; qsort_bContrib = bContrib; qsort_poolCounts = poolCounts; qsort_maxCount = maxCount; qsort(moves,numMoves,sizeof(int),betterMoves1st); } int bestMove = moves[0]; #else int bestMove = findBestMove(moves,numMoves,nPools,pools,bContrib,poolCounts,maxCount); #endif if(*threads_needing_to_reset_iter & myMask) { #if PARALLELIZE_POOLSPLIT && defined(_OPENMP) #pragma omp critical #endif { *threads_needing_to_reset_iter &= ~myMask; *totalIterations += iter; } iter = 0; } int timesUp = stop_state || (limitTime && time(NULL) >= limitTime); if(timesUp || !valueOfMove(bestMove,nPools,pools,bContrib,poolCounts,maxCount)) { /* looks like we're at a local maxima */ willContinue = !timesUp; ULL gBad = globalBadness(scores,ap.np,pools); int keep = !*bestPools_init_yet || gBad < *gBadLast; if (keep) #if defined(_OPENMP) #pragma omp critical #endif if ((keep = !*bestPools_init_yet || gBad < *gBadLast) != 0) { *bestPools_init_yet = 1; wrapped_memcpy(bestPools,pools,ap.np*sizeof(int)); *gBadLast = gBad; *totalIterations += iter; } if(gBad < (ULL)1<<48) willContinue=0; // everything down to score 0 - can't very much improve on that (except for reducing # pools or size difference) if (keep) { iter = 0; if(time(NULL)-*lastOutputTime > 2) { int should_print_counts = 0; #if PARALLELIZE_POOLSPLIT && defined(_OPENMP) #pragma omp critical #endif if(time(NULL)-*lastOutputTime > 2) { *lastOutputTime = time(NULL); *threads_needing_to_reset_iter = ~0; should_print_counts = 1; } if(should_print_counts) { *overlaps=table?dGprintPooledCounts(ap,pools,scores,stderr) : printPooledCounts(ap,pools,scores); printNumInEachPool(poolCounts,nPools); if(!willContinue) { *just_printed_counts = 1; break; } fprintf(stderr,"Local maxima found after %" QUOT "ld moves\nTrying to better it... (press Control-C to stop)\n",*totalIterations+iter); /* TODO: what about the 'iter' values of other threads? (or just don't count them yet) */ fflush(stderr); /* in case of broken Windows/WINE etc (see comments in user.c) */ } } } else { /* this maxima doesn't beat the best we've seen */ if(iter>max_iterations && !stop_state) { fputs("Too many moves without improvement: giving up\n",stderr); willContinue=0; /* and stop other threads: */ stop_state = s_tooManyIters; } } if(!willContinue) break; if(keep) { /* already found a good local maxima, so just take a few random steps away from it... */ int randomMoves = 5+ThreadRand()%5, i; for(i=0; i<randomMoves; i++) { int moveToMake=ThreadRand()%numMoves; if(maxCount) while(poolCounts[poolOfMove(moves[moveToMake],nPools,pools)]==maxCount) if(++moveToMake==numMoves) moveToMake=0; make_a_move(moves[moveToMake],ap.np,scores,primerMove_depends_on,nPools,pools,bContrib,poolCounts,maxCount); } continue; /* don't do the additional make_a_move below (we'd have to repeat the maxCount condition) */ } else { /* local maximae getting worse... get me out of here! */ break; } } #if USE_QSORT int i = 0; while(!(ThreadRand()%5) && i<numMoves-1 && valueOfMove(moves[i+1],nPools,pools,bContrib,poolCounts,maxCount)) ++i; /* sometimes don't pick the best one, just in case (TODO: can we write code to "get the top N items" w/out a complete sort?) */ bestMove = moves[i]; #endif make_a_move(bestMove,ap.np,scores,primerMove_depends_on,nPools,pools,bContrib,poolCounts,maxCount); } } if(bContrib) free(bContrib); if(pools) free(pools); free(poolCounts); #if PARALLELIZE_POOLSPLIT && defined(_OPENMP) #pragma omp critical #endif *totalIterations += iter; } PS_cache PS_precalc(AllPrimers ap,const float *table,const char *overlappingAmplicons,const int *primerNoToAmpliconNo,int nAmplicons) { PS_cache r; addTags(ap); r.scores = table ? dGtriangle(ap,table) : triangle(ap); removeTags(ap); r.primerMove_depends_on = merge_scores_of_stuckTogether_primers(ap,r.scores); r.fix_to_pool = pre_fix_primers_to_pools(ap); if(memFail(r.scores,r.primerMove_depends_on,r.fix_to_pool,_memFail)) r.scores = NULL; else { saturate_scores_of_overlapping_primers(r.scores,overlappingAmplicons,primerNoToAmpliconNo,nAmplicons,ap.np); r.fix_min_pools = 2; int i; for(i=0; i<ap.np; i++) if(r.fix_to_pool[i]>=r.fix_min_pools) r.fix_min_pools=r.fix_to_pool[i]+1; } return r; } void PS_free(PS_cache c) { if(c.scores) { free(c.scores); free(c.primerMove_depends_on); free(c.fix_to_pool); } } int* split_into_pools(AllPrimers ap,int nPools,int timeLimit,PS_cache cache,int seedless,const float *table,int maxCount) { int *scores = cache.scores; if(!scores) return NULL; int *primerMove_depends_on = cache.primerMove_depends_on; int *fix_to_pool = cache.fix_to_pool; { if(nPools<cache.fix_min_pools) { fprintf(stderr,"ERROR: @%d:primers need at least %d pools, but only got %d\n",cache.fix_min_pools,cache.fix_min_pools,nPools); return NULL; } } if(maxCount) { int denom=0,i; for(i=0; i<ap.np; i++) if(primerMove_depends_on[i]!=-1) denom++; maxCount=maxCount*denom/ap.np; if(!maxCount) maxCount=1; } /* pairs */ int numMoves=0,*shared_moves=initMoves(&numMoves,ap.np,nPools,primerMove_depends_on,fix_to_pool); /* =0 to stop warnings on old compilers */ if(memFail(shared_moves,_memFail)) return NULL; if(!numMoves) { fputs("Can't move anything!\n",stderr); free(shared_moves); return NULL; } int *bestPools = malloc(ap.np*sizeof(int)); if(memFail(shared_moves,bestPools,_memFail)) return NULL; time_t start = time(NULL); srand(seedless ? 1 : start); int bestPools_init_yet = 0; ULL gBadLast=0; /* latter =0 to stop warnings on old compilers */ time_t lastOutputTime = (time_t)0; /* so 1st maxima gets output no matter what (might be needed if break after) */ time_t limitTime = (time_t)0; if(timeLimit) limitTime = time(NULL) + timeLimit*60; /* (timeLimit is in minutes) */ int just_printed_counts = 0, overlaps = 0; stop_state = s_KeepGoing; signal(SIGINT, intHandler); if(omp_get_max_threads() > 1) { if(seedless) { omp_set_num_threads(1); fputs("NOT parallelising the pool trials, as you asked for predictability.\n",stderr); } else fprintf(stderr,"Parallelising pool trials: %d threads\n",omp_get_max_threads()); } fprintf(stderr,"OK, here goes... (press Control-C to stop%s)\n",timeLimit?" early":""); fflush(stderr); long totalIterations = 0; ThreadMask threads_needing_to_reset_iter = 0; #if PARALLELIZE_POOLSPLIT && defined(_OPENMP) #pragma omp parallel #endif poolsplit_thread(shared_moves,ap,nPools,numMoves,primerMove_depends_on,fix_to_pool,scores,limitTime,bestPools,table, &bestPools_init_yet,&gBadLast,&totalIterations,&lastOutputTime,&overlaps,&just_printed_counts,&threads_needing_to_reset_iter,maxCount); signal(SIGINT, SIG_DFL); if(!just_printed_counts) { fputs("... looks like this is the best I can do:\n",stderr); overlaps = table ? dGprintPooledCounts(ap,bestPools,scores,stderr) : printPooledCounts(ap,bestPools,scores); int *counts=numInEachPool(bestPools,ap.np,nPools,primerMove_depends_on); if(counts) { printNumInEachPool(counts,nPools); free(counts); } } long numSecs = (long)(time(NULL)-start); if(!numSecs) numSecs=1; /* so division doesn't crash */ fprintf(stderr,"%" QUOT "ld moves",totalIterations); prnSeconds(numSecs); fprintf(stderr," = %" QUOT "ld/sec\n",totalIterations/numSecs); if(bestPools && overlaps) { if(stop_state == s_tooManyIters) fprintf(stderr,"WARNING: There are still overlaps in these pools,\neven after this number of moves.\nYou might need more pools.\n"); else fprintf(stderr,"WARNING: There are still overlaps in these pools.\nMaybe you should have let it run longer\nto see if these overlaps can be eliminated.\n"); } fflush(stderr); free(shared_moves); return bestPools; } int suggest_num_pools(AllPrimers ap,PS_cache cache,const float *table) { /* Apply a simple threshold-based allocation just for suggesting a number of pools */ int threshold = table ? 14 : 7; /* dG -7 or score 7. TODO: customise? but this function is for when the user is not sure, so perhaps we'd best hard-code the threshold */ int nPools = ap.np; /* worst case is none of the primers are paired (unpaired primers could hang randomise_pools before v1.42 because this line said ap.np/2) */ int *scores = cache.scores; if(!scores) return 0; int *primerMove_depends_on = cache.primerMove_depends_on; int *fix_to_pool = cache.fix_to_pool; ULL *bContrib = malloc(ap.np*nPools*sizeof(ULL)); int *poolCounts=malloc(nPools*sizeof(int)); int *pools = malloc(ap.np*sizeof(int)); if(memFail(bContrib,poolCounts,pools,_memFail)) return 0; randomise_pools(ap.np,primerMove_depends_on,fix_to_pool,scores,nPools,pools,bContrib,poolCounts,1); /* puts 0 or 1 set in each pool (after the fixed ones) */ int suggest_nPools = 1; int primer; for (primer=0; primer<ap.np; primer++) if (primerMove_depends_on[primer]==-1) { if (fix_to_pool[primer]==-1) { int destPool; for (destPool=0; destPool < suggest_nPools; destPool++) if(maxScoreOfBadness(bContrib[primerAndPool_to_contribOffset(primer,destPool,nPools)]) <= threshold) break; /* find first pool it will 'fit' in */ if (destPool == suggest_nPools) suggest_nPools++; if (pools[primer] != destPool) make_a_move(primerAndDest_to_moveNo(primer,destPool,nPools,pools),ap.np,scores,primerMove_depends_on,nPools,pools,bContrib,poolCounts,ap.np); } else if (fix_to_pool[primer] >= suggest_nPools) { /* must have at least as many for the fixed-pool primers (and fix_to_pool starts numbering at 0, so +1 of course) */ suggest_nPools = fix_to_pool[primer] + 1; } } free(bContrib); free(pools); free(poolCounts); return suggest_nPools; }
lis_matrix_ilu.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <stdio.h> #include <stdlib.h> #ifdef HAVE_MALLOC_H #include <malloc.h> #endif #include <string.h> #include <stdarg.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" #undef __FUNC__ #define __FUNC__ "lis_matrix_ilu_create" LIS_INT lis_matrix_ilu_create(LIS_INT n, LIS_INT bs, LIS_MATRIX_ILU *A) { LIS_INT i; LIS_INT *nnz; LIS_INT **index; LIS_DEBUG_FUNC_IN; *A = NULL; nnz = NULL; index = NULL; *A = (LIS_MATRIX_ILU)lis_malloc( sizeof(struct LIS_MATRIX_ILU_STRUCT),"lis_matrix_ilu_create::A" ); if( NULL==*A ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_ILU_STRUCT)); return LIS_OUT_OF_MEMORY; } memset(*A,0,sizeof(struct LIS_MATRIX_ILU_STRUCT)); nnz = (LIS_INT *)lis_malloc( n*sizeof(LIS_INT),"lis_matrix_ilu_create::nnz" ); if( nnz==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } index = (LIS_INT **)lis_malloc( n*sizeof(LIS_INT *),"lis_matrix_ilu_create::index" ); if( index==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT *)); return LIS_OUT_OF_MEMORY; } #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { nnz[i] = 0; index[i] = NULL; } (*A)->n = n; (*A)->bs = bs; (*A)->nnz = nnz; (*A)->index = index; (*A)->nnz_ma = NULL; (*A)->value = NULL; (*A)->values = NULL; (*A)->bsz = NULL; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_ilu_setCR" LIS_INT lis_matrix_ilu_setCR(LIS_MATRIX_ILU A) { LIS_INT n; LIS_SCALAR **value; LIS_DEBUG_FUNC_IN; n = A->n; value = (LIS_SCALAR **)lis_malloc( n*sizeof(LIS_SCALAR *),"lis_matrix_ilu_setCR::value" ); if( value==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } A->value = value; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_ilu_setVR" LIS_INT lis_matrix_ilu_setVR(LIS_MATRIX_ILU A) { LIS_INT n; LIS_INT *bsz; LIS_SCALAR ***values; LIS_DEBUG_FUNC_IN; n = A->n; bsz = (LIS_INT *)lis_malloc( n*sizeof(LIS_INT),"lis_matrix_ilu_setVR::bsz" ); if( bsz==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } values = (LIS_SCALAR ***)lis_malloc( n*sizeof(LIS_SCALAR **),"lis_matrix_ilu_setVR::values" ); if( values==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_SCALAR *)); return LIS_OUT_OF_MEMORY; } A->bsz = bsz; A->values = values; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_ilu_destroy" LIS_INT lis_matrix_ilu_destroy(LIS_MATRIX_ILU A) { LIS_INT i,j; LIS_DEBUG_FUNC_IN; if( lis_is_malloc(A) ) { if( A->bsz ) { for(i=0;i<A->n;i++) { free(A->index[i]); for(j=0;j<A->nnz[i];j++) { free(A->values[i][j]); } if( A->nnz[i]>0 ) free(A->values[i]); } lis_free2(5,A->bsz,A->nnz,A->index,A->values,A->nnz_ma); } else { for(i=0;i<A->n;i++) { if( A->nnz[i]>0 ) { free(A->index[i]); free(A->value[i]); } } lis_free2(4,A->nnz,A->index,A->value,A->nnz_ma); } lis_free(A); } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_ilu_create" LIS_INT lis_matrix_ilu_premalloc(LIS_INT nnzrow, LIS_MATRIX_ILU A) { LIS_INT i,n; LIS_INT *nnz_ma; LIS_DEBUG_FUNC_IN; n = A->n; nnz_ma = (LIS_INT *)lis_malloc( n*sizeof(LIS_INT),"lis_matrix_ilu_premalloc::nnz_ma" ); if( nnz_ma==NULL ) { LIS_SETERR_MEM(n*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { nnz_ma[i] = nnzrow; A->index[i] = (LIS_INT *)malloc( nnzrow*sizeof(LIS_INT) ); A->value[i] = (LIS_SCALAR *)malloc( nnzrow*sizeof(LIS_SCALAR) ); } for(i=0;i<n;i++) { if( A->index[i]==NULL ) { LIS_SETERR_MEM(nnzrow*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } if( A->value[i]==NULL ) { LIS_SETERR_MEM(nnzrow*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } } A->nnz_ma = nnz_ma; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_ilu_realloc" LIS_INT lis_matrix_ilu_realloc(LIS_INT row, LIS_INT nnz, LIS_MATRIX_ILU A) { LIS_DEBUG_FUNC_IN; A->index[row] = (LIS_INT *)realloc(A->index[row],nnz*sizeof(LIS_INT)); if( A->index[row]==NULL ) { LIS_SETERR_MEM(nnz*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } A->value[row] = (LIS_SCALAR *)realloc(A->value[row],nnz*sizeof(LIS_SCALAR)); if( A->value[row]==NULL ) { LIS_SETERR_MEM(nnz*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matvect_ilu" LIS_INT lis_matvect_ilu(LIS_MATRIX A, LIS_MATRIX_ILU LU, LIS_VECTOR X, LIS_VECTOR Y) { LIS_INT i,j,jj,n; LIS_SCALAR t,*x,*y; LIS_QUAD_DECLAR; #ifdef USE_QUAD_PRECISION LIS_INT j0,j1; LIS_QUAD_PD tt; #endif LIS_DEBUG_FUNC_IN; n = LU->n; x = X->value; y = Y->value; #ifdef USE_QUAD_PRECISION if( X->precision==LIS_PRECISION_DEFAULT ) #endif { #ifdef USE_MPI LIS_MATVEC_SENDRECV; #endif #ifdef _OPENMP #pragma omp parallel for private(i,j,jj,t) #endif for(i=0;i<n;i++) { t = 0.0; for(j=0;j<LU->nnz[i];j++) { jj = LU->index[i][j]; t += LU->value[i][j] * X->value[jj]; } Y->value[i] = t; } } #ifdef USE_QUAD_PRECISION else { #ifdef USE_MPI lis_send_recv_mp(A->commtable,X); #endif #ifndef USE_FMA2_SSE2 #ifndef USE_SSE2 #pragma omp parallel private(i,j,jj,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel private(i,j,jj,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif for(i=0;i<n;i++) { Y->value[i] = Y->value_lo[i] = 0.0; for(j=0;j<LU->nnz[i];j++) { jj = LU->index[i][j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(Y->value[i],Y->value_lo[i],Y->value[i],Y->value_lo[i],X->value[jj],X->value_lo[jj],LU->value[i][j]); #else LIS_QUAD_FMAD_SSE2(Y->value[i],Y->value_lo[i],Y->value[i],Y->value_lo[i],X->value[jj],X->value_lo[jj],LU->value[i][j]); #endif } } #else #ifdef _OPENMP #ifndef USE_SSE2 #pragma omp parallel for private(i,j,j0,j1,tt,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel for private(i,j,j0,j1,tt,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif #endif for(i=0;i<n;i++) { tt.hi[0] = tt.hi[1] = tt.lo[0] = tt.lo[1] = 0.0; for(j=0;j<LU->nnz[i]-1;j+=2) { j0 = LU->index[i][j]; j1 = LU->index[i][j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_LDSD(tt.hi[0],tt.lo[0],tt.hi[0],tt.lo[0],X->value[j0],X->value_lo[j0],X->value[j1],X->value_lo[j1],LU->value[i][j]); #endif } #ifdef USE_SSE2 LIS_QUAD_ADD_SSE2(Y->value[i],Y->value_lo[i],tt.hi[0],tt.lo[0],tt.hi[1],tt.lo[1]); #endif for(;j<LU->nnz[i];j++) { j0 = LU->index[i][j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(Y->value[i],Y->value_lo[i],Y->value[i],Y->value_lo[i],X->value[j0],X->value_lo[j0],LU->value[i][j]); #endif } } #endif } #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matvec_ilu" LIS_INT lis_matvec_ilu(LIS_MATRIX A, LIS_MATRIX_ILU LU, LIS_VECTOR X, LIS_VECTOR Y) { LIS_INT i,j,jj,n,np; LIS_SCALAR *x,*y; #ifdef _OPENMP LIS_INT nprocs,k; LIS_SCALAR t,*w; #endif #ifdef USE_QUAD_PRECISION LIS_INT j0,j1; #ifdef _OPENMP LIS_SCALAR *ww,*wwl; #endif #endif LIS_QUAD_DECLAR; LIS_DEBUG_FUNC_IN; np = A->np; n = LU->n; x = X->value; y = Y->value; #ifdef USE_QUAD_PRECISION if( X->precision==LIS_PRECISION_DEFAULT ) #endif { #ifdef USE_MPI LIS_MATVEC_SENDRECV; #endif #ifdef _OPENMP nprocs = omp_get_max_threads(); w = (LIS_SCALAR *)lis_malloc( nprocs*np*sizeof(LIS_SCALAR),"lis_matvect_crs::w" ); #pragma omp parallel private(i,j,k,jj,t) { k = omp_get_thread_num(); #pragma omp for for(j=0;j<nprocs;j++) { memset( &w[j*np], 0, np*sizeof(LIS_SCALAR) ); } #pragma omp for for(i=0;i<n;i++) { for(j=0;j<LU->nnz[i];j++) { jj = k*np + LU->index[i][j]; w[jj] += LU->value[i][j] * X->value[i]; } } #pragma omp for for(i=0;i<np;i++) { t = 0.0; for(j=0;j<nprocs;j++) { t += w[j*np+i]; } Y->value[i] = t; } } lis_free(w); #else for(i=0;i<np;i++) { Y->value[i] = 0.0; } for(i=0;i<n;i++) { for(j=0;j<LU->nnz[i];j++) { jj = LU->index[i][j]; Y->value[jj] += LU->value[i][j] * X->value[i]; } } #endif } #ifdef USE_QUAD_PRECISION else { #ifdef USE_MPI lis_send_recv_mp(A->commtable,X); #endif #ifdef _OPENMP #ifndef USE_FMA2_SSE2 nprocs = omp_get_max_threads(); ww = (LIS_SCALAR *)lis_malloc( 2*nprocs*np*sizeof(LIS_SCALAR),"lis_matvect_crs_mp::ww" ); wwl = &ww[nprocs*np]; #ifndef USE_SSE2 #pragma omp parallel private(i,j,jj,k,p1,p2,tq,bhi,blo,chi,clo,sh,sl,th,tl,eh,el) #else #pragma omp parallel private(i,j,jj,k,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) #endif { k = omp_get_thread_num(); #pragma omp for for(j=0;j<nprocs;j++) { memset( &ww[j*np], 0, np*sizeof(LIS_SCALAR) ); memset( &wwl[j*np], 0, np*sizeof(LIS_SCALAR) ); } #pragma omp for for(i=0;i<n;i++) { for(j=0;j<LU->nnz[i];j++) { jj = k*np + LU->index[i][j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(ww[jj],wwl[jj],ww[jj],wwl[jj],X->value[i],X->value_lo[i],LU->value[i][j]); #else LIS_QUAD_FMAD_SSE2(ww[jj],wwl[jj],ww[jj],wwl[jj],X->value[i],X->value_lo[i],LU->value[i][j]); #endif } } #pragma omp for for(i=0;i<np;i++) { Y->value[i] = Y->value_lo[i] = 0.0; for(j=0;j<nprocs;j++) { #ifndef USE_SSE2 LIS_QUAD_ADD(Y->value[i],Y->value_lo[i],Y->value[i],Y->value_lo[i],ww[j*np+i],wwl[j*np+i]); #else LIS_QUAD_ADD_SSE2(Y->value[i],Y->value_lo[i],Y->value[i],Y->value_lo[i],ww[j*np+i],wwl[j*np+i]); #endif } } } lis_free(ww); #else nprocs = omp_get_max_threads(); ww = (LIS_SCALAR *)lis_malloc( 2*nprocs*np*sizeof(LIS_SCALAR), "lis_matvect_crs_mp2::ww" ); wwl = &ww[nprocs*np]; #pragma omp parallel private(i,j,j0,j1,k,bh,ch,sh,wh,th,bl,cl,sl,wl,tl,p1,p2,t0,t1,t2,eh) { k = omp_get_thread_num(); #pragma omp for for(j=0;j<nprocs;j++) { memset( &ww[j*np], 0, np*sizeof(LIS_SCALAR) ); memset( &wwl[j*np], 0, np*sizeof(LIS_SCALAR) ); } #pragma omp for for(i=0; i<n; i++) { for(j=0;j<LU->nnz[i]-1;j+=2) { j0 = k*np + LU->index[i][j]; j1 = k*np + LU->index[i][j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_STSD(ww[j0],wwl[j0],ww[j1],wwl[j1],ww[j0],wwl[j0],ww[j1],wwl[j1],X->value[i],X->value_lo[i],X->value[i],X->value_lo[i],LU->value[i][j]); #endif } for(;j<LU->nnz[i];j++) { j0 = LU->index[i][j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(ww[j0],wwl[j0],ww[j0],wwl[j0],X->value[i],X->value_lo[i],LU->value[i][j]); #endif } } #pragma omp for for(i=0;i<np;i++) { Y->value[i] = Y->value_lo[i] = 0.0; for(j=0;j<nprocs;j++) { #ifdef USE_SSE2 LIS_QUAD_ADD_SSE2(Y->value[i],Y->value_lo[i],Y->value[i],Y->value_lo[i],ww[j*np+i],wwl[j*np+i]); #endif } } } lis_free(ww); #endif #else #ifndef USE_FMA2_SSE2 for(i=0;i<np;i++) { Y->value[i] = 0.0; Y->value_lo[i] = 0.0; } for(i=0;i<n;i++) { for(j=0;j<LU->nnz[i];j++) { jj = LU->index[i][j]; #ifndef USE_SSE2 LIS_QUAD_FMAD(Y->value[jj],Y->value_lo[jj],Y->value[jj],Y->value_lo[jj],X->value[i],X->value_lo[i],LU->value[i][j]); #else LIS_QUAD_FMAD_SSE2(Y->value[jj],Y->value_lo[jj],Y->value[jj],Y->value_lo[jj],X->value[i],X->value_lo[i],LU->value[i][j]); #endif } } #else for(i=0; i<np; i++) { Y->value[i] = 0.0; Y->value_lo[i] = 0.0; } for(i=0; i<n; i++) { for(j=0;j<LU->nnz[i]-1;j+=2) { j0 = LU->index[i][j]; j1 = LU->index[i][j+1]; #ifdef USE_SSE2 LIS_QUAD_FMAD2_SSE2_STSD(Y->value[j0],Y->value_lo[j0],Y->value[j1],Y->value_lo[j1],Y->value[j0],Y->value_lo[j0],Y->value[j1],Y->value_lo[j1],X->value[i],X->value_lo[i],X->value[i],X->value_lo[i],LU->value[i][j]); #endif } for(;j<LU->nnz[i];j++) { j0 = LU->index[i][j]; #ifdef USE_SSE2 LIS_QUAD_FMAD_SSE2(Y->value[j0],Y->value_lo[j0],Y->value[j0],Y->value_lo[j0],X->value[i],X->value_lo[i],LU->value[i][j]); #endif } } #endif #endif } #endif LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; }
epir_selector_factory.c
#include "epir.h" static inline int epir_selector_factory_ctx_init_( epir_selector_factory_ctx *ctx, const bool is_fast, const unsigned char *key, const uint32_t capacity_zero, const uint32_t capacity_one) { ctx->is_fast = is_fast; memcpy(ctx->key, key, 32); ctx->capacities[0] = capacity_zero; ctx->capacities[1] = capacity_one; ctx->ciphers[0] = malloc(sizeof(unsigned char) * EPIR_CIPHER_SIZE * capacity_zero); if(ctx->ciphers[0] == NULL) return -1; ctx->ciphers[1] = malloc(sizeof(unsigned char) * EPIR_CIPHER_SIZE * capacity_one); if(ctx->ciphers[1] == NULL) return -1; ctx->idx[0] = ctx->idx[1] = -1; int ret; if((ret = pthread_mutex_init(&ctx->mutex, NULL)) != 0) return ret; return 0; } int epir_selector_factory_ctx_init( epir_selector_factory_ctx *ctx, const unsigned char *pubkey, const uint32_t capacity_zero, const uint32_t capacity_one) { return epir_selector_factory_ctx_init_(ctx, false, pubkey, capacity_zero, capacity_one); } int epir_selector_factory_ctx_init_fast( epir_selector_factory_ctx *ctx, const unsigned char *privkey, const uint32_t capacity_zero, const uint32_t capacity_one) { return epir_selector_factory_ctx_init_(ctx, true, privkey, capacity_zero, capacity_one); } int epir_selector_factory_ctx_destroy(epir_selector_factory_ctx *ctx) { free(ctx->ciphers[0]); free(ctx->ciphers[1]); int ret; if((ret = pthread_mutex_destroy(&ctx->mutex)) != 0) return ret; return 0; } int epir_selector_factory_fill_sync(epir_selector_factory_ctx *ctx) { epir_ecelgamal_encrypt_fn *encrypt = ctx->is_fast ? epir_ecelgamal_encrypt_fast : epir_ecelgamal_encrypt; int ret = 0; for(size_t msg=0; msg<2; msg++) { int32_t needs = ctx->capacities[msg] - ctx->idx[msg] - 1; #pragma omp parallel for for(int32_t i=0; i<needs; i++) { unsigned char cipher[EPIR_CIPHER_SIZE]; encrypt(cipher, ctx->key, msg, NULL); if(pthread_mutex_lock(&ctx->mutex) != 0) { ret = 1; continue; } const int32_t idx = ++ctx->idx[msg]; if(idx >= (int32_t)ctx->capacities[msg]) { ret = 2; continue; } memcpy(&ctx->ciphers[msg][idx * EPIR_CIPHER_SIZE], cipher, EPIR_CIPHER_SIZE); if(pthread_mutex_unlock(&ctx->mutex) != 0) { ret = 3; continue; } } } return ret; } static void *epir_selector_factory_thread(void *ctx_) { epir_selector_factory_ctx *ctx = ctx_; epir_selector_factory_fill_sync(ctx); return NULL; } int epir_selector_factory_fill(epir_selector_factory_ctx *ctx) { int ret; if((ret = pthread_create(&ctx->thread, NULL, epir_selector_factory_thread, ctx)) != 0) return ret; return 0; } int epir_selector_factory_create_selector( unsigned char *ciphers, epir_selector_factory_ctx *ctx, const uint64_t *index_counts, const uint8_t n_indexes, const uint64_t idx) { uint64_t n_ciphers = epir_selector_ciphers_count(index_counts, n_indexes); epir_selector_create_choice(ciphers, EPIR_CIPHER_SIZE, index_counts, n_indexes, idx); int ret; if((ret = pthread_mutex_lock(&ctx->mutex)) != 0) return ret; for(size_t i=0; i<n_ciphers; i++) { uint8_t choice = ciphers[i * EPIR_CIPHER_SIZE]; if(ctx->idx[choice] < 0) return -1; memcpy(&ciphers[i * EPIR_CIPHER_SIZE], &ctx->ciphers[choice][ctx->idx[choice] * EPIR_CIPHER_SIZE], EPIR_CIPHER_SIZE); ctx->idx[choice]--; } if((ret = pthread_mutex_unlock(&ctx->mutex)) != 0) return ret; return 0; }
9_data-env3.c
#include <stdio.h> #include <omp.h> #include <stdlib.h> int main(int argc, char** argv) { int x = 100; omp_set_num_threads(20); #pragma omp parallel private(x) { x = omp_get_thread_num(); printf("Sou a thread %d, meu valor de x é %d\n", omp_get_thread_num(), x); } return 0; }
rose_Stress-1.c
//#include <float.h> //#include <math.h> #define MIN(a, b) ( (a < b) ? a : b) #define MAX(a, b) ( (a > b) ? a : b) #include <omp.h> typedef double real8; void StressCheckEpsFail(real8 *newSxx,real8 *newSyy,real8 *newSzz,real8 *newTxy,real8 *newTxz,real8 *newTyz,real8 *eps,real8 eps_failure_model,const int *zoneset,int length) { int i; int index; #pragma omp parallel for private (index,i) firstprivate (eps_failure_model,length) for (i = 0; i <= length - 1; i += 1) { index = zoneset[i]; if (eps[zoneset[i]] > eps_failure_model) { newSxx[i] = 0.0; newSyy[i] = 0.0; newSzz[i] = 0.0; newTxy[i] = 0.0; newTxz[i] = 0.0; newTyz[i] = 0.0; eps[zoneset[i]] = eps_failure_model * 1.01; } } } void StressStrainWork(real8 *deltz,real8 *delts,const real8 *newSxx,const real8 *newSyy,const real8 *newSzz,const real8 *newTxy,const real8 *newTxz,const real8 *newTyz,const real8 *sxx,const real8 *syy,const real8 *txy,const real8 *txz,const real8 *tyz,const real8 *dxx,const real8 *dyy,const real8 *dzz,const real8 *dxy,const real8 *dxz,const real8 *dyz,real8 deltaTime,const int *zoneset,const real8 *vc,const real8 *vnewc,int length) { int i; int index; real8 quarterDelta = 0.25 * deltaTime; real8 szz; #pragma omp parallel for private (index,szz,i) firstprivate (length,quarterDelta) for (i = 0; i <= length - 1; i += 1) { index = zoneset[i]; szz = -sxx[zoneset[i]] - syy[zoneset[i]]; deltz[zoneset[i]] += quarterDelta * (vnewc[i] + vc[i]) * (dxx[zoneset[i]] * (sxx[zoneset[i]] + newSxx[i]) + dyy[zoneset[i]] * (syy[zoneset[i]] + newSyy[i]) + dzz[zoneset[i]] * (szz + newSzz[i]) + 2. * dxy[zoneset[i]] * (txy[zoneset[i]] + newTxy[i]) + 2. * dxz[zoneset[i]] * (txz[zoneset[i]] + newTxz[i]) + 2. * dyz[zoneset[i]] * (tyz[zoneset[i]] + newTyz[i])); delts[i] += quarterDelta * (vnewc[i] + vc[i]) * (dxx[zoneset[i]] * sxx[zoneset[i]] + dyy[zoneset[i]] * syy[zoneset[i]] + dzz[zoneset[i]] * szz + 2. * dxy[zoneset[i]] * txy[zoneset[i]] + 2. * dxz[zoneset[i]] * txz[zoneset[i]] + 2. * dyz[zoneset[i]] * tyz[zoneset[i]]); } } void StressStrainHeat(const real8 *deltz,real8 *deltzh,real8 *deltrh,const real8 *shearMod,const real8 *shearRatio,const real8 *shearDer,const real8 *newSxx,const real8 *newSyy,const real8 *newSzz,const real8 *newTxy,const real8 *newTxz,const real8 *newTyz,const real8 *sxx,const real8 *syy,const real8 *txy,const real8 *txz,const real8 *tyz,real8 deltaTime,const int *zoneset,const real8 *vc,const real8 *vnewc,int length) { real8 shearr; real8 sheari; real8 avgMod; int nz; int i; /* Quiet the compiler - unused argument */ deltaTime = deltaTime; #pragma omp parallel for private (shearr,sheari,avgMod,nz,i) firstprivate (length) for (i = 0; i <= length - 1; i += 1) { nz = zoneset[i]; shearr = 0.5 * shearRatio[i]; if (shearMod[zoneset[i]] > 0.) { sheari = 0.5 / shearMod[zoneset[i]]; deltrh[zoneset[i]] = .25 * (vnewc[i] + vc[i]) * ((newSxx[i] * sheari - sxx[zoneset[i]] * shearr) * (sxx[zoneset[i]] + newSxx[i]) + (newSyy[i] * sheari - syy[zoneset[i]] * shearr) * (syy[zoneset[i]] + newSyy[i]) + (newSzz[i] * sheari + (syy[zoneset[i]] + sxx[zoneset[i]]) * shearr) * (newSzz[i] - sxx[zoneset[i]] - syy[zoneset[i]]) + 2. * (newTxy[i] * sheari - txy[zoneset[i]] * shearr) * (txy[zoneset[i]] + newTxy[i]) + 2. * (newTxz[i] * sheari - txz[zoneset[i]] * shearr) * (txz[zoneset[i]] + newTxz[i]) + 2. * (newTyz[i] * sheari - tyz[zoneset[i]] * shearr) * (tyz[zoneset[i]] + newTyz[i])); } else { deltrh[zoneset[i]] = - .25 * (vnewc[i] + vc[i]) * (sxx[zoneset[i]] * (sxx[zoneset[i]] + newSxx[i]) + syy[zoneset[i]] * (syy[zoneset[i]] + newSyy[i]) - (syy[zoneset[i]] + sxx[zoneset[i]]) * (newSzz[i] - sxx[zoneset[i]] - syy[zoneset[i]]) + 2. * txy[zoneset[i]] * (txy[zoneset[i]] + newTxy[i]) + 2. * txz[zoneset[i]] * (txz[zoneset[i]] + newTxz[i]) + 2. * tyz[zoneset[i]] * (tyz[zoneset[i]] + newTyz[i])) * shearr; } deltzh[zoneset[i]] = deltz[zoneset[i]] - deltrh[zoneset[i]]; avgMod = 0.5 * shearMod[zoneset[i]]; if (shearRatio[i] > 0.0) avgMod = avgMod + 0.5 / shearRatio[i]; if (avgMod > 0.0) deltrh[zoneset[i]] = shearDer[i] * deltrh[zoneset[i]] / avgMod; else deltrh[zoneset[i]] = 0.0; } }
undirected_edge.h
#pragma once #include <gms/common/types.h> #include <vector> #include <cassert> namespace GMS::LinkPrediction { /** * Represents an undirected edge as a std::pair, with the invariant first <= second. */ class UndirectedEdge : public std::pair<NodeId, NodeId> { public: UndirectedEdge() : std::pair<NodeId, NodeId>(0, 0) {} UndirectedEdge(NodeId u, NodeId v) : std::pair<NodeId, NodeId>(u, v) { assert(u <= v); } }; // NOTE: Currently only used for debug assertions. template <class SGraph> int64_t count_undirected_edges(const SGraph &graph) { int64_t count = 0; int64_t num_nodes = graph.num_nodes(); int64_t self_cycles = 0; #pragma omp parallel for reduction(+: count, self_cycles) for (NodeId u = 0; u < num_nodes; ++u) { for (NodeId v : graph.out_neigh(u)) { if (u < v) { ++count; assert(graph.out_neigh(v).contains(u)); } else if (u == v) { ++self_cycles; } } } return count + self_cycles / 2; } }
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register Quantum *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char ) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image,ExceptionInfo *exception) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { char value[MagickPathExtent]; unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g", image->resolution.x); (void) SetImageProperty(image,"tiff:XResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g", image->resolution.y); (void) SetImageProperty(image,"tiff:YResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) *has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline void ReversePSDString(Image *image,char *p,size_t length) { char *q; if (image->endian == MSBEndian) return; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; if (type == 0) { if (packet_size == 1) SetPixelIndex(image,ScaleQuantumToChar(pixel),q); else SetPixelIndex(image,ScaleQuantumToShort(pixel),q); } color=image->colormap+(ssize_t) ConstrainColormapIndex(image, (ssize_t) GetPixelIndex(image,q),exception); if ((type == 0) && (channels > 1)) return; else color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); return; } switch (type) { case -1: { SetPixelAlpha(image,pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); break; } case -3: case 1: { SetPixelGreen(image,pixel,q); break; } case -4: case 2: { SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register Quantum *q; register ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) { status=MagickFalse; break; } status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) /* arbitrary number */ { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } /* else if (packet_size == 4) { TODO: Figure out what to do there. } */ else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); /* TODO: Remove this when we figure out how to support this */ if ((compression == ZipWithPrediction) && (image->depth == 32)) { (void) ThrowMagickException(exception,GetMagickModule(), TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)"); return(MagickFalse); } layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void AttachPSDLayers(Image *image,LayerInfo *layer_info, ssize_t number_layers) { register ssize_t i; ssize_t j; for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers == 0) { layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const ImageInfo *image_info, const size_t index) { if (image_info->number_scenes == 0) return(MagickFalse); if (index < image_info->scene) return(MagickTrue); if (index > image_info->scene+image_info->number_scenes-1) return(MagickTrue); return(MagickFalse); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,(size_t) count); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } if (size == 0) return(MagickTrue); layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image,layer_info,number_layers); return(MagickTrue); } status=MagickTrue; for (i=0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(image_info,i) != MagickFalse)) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image,layer_info,number_layers); else layer_info=DestroyLayerInfo(layer_info,number_layers); return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return(MagickTrue); compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t imageListLength; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,(size_t) (psd_info.depth < 16 ? 256 : 65536),exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace,exception); } if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(image,blocks,(size_t) length, &has_merged_image,exception); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); imageListLength=GetImageListLength(image); if ((has_merged_image != MagickFalse) || (imageListLength == 1)) has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image, &psd_info,exception); if ((has_merged_image == MagickFalse) && (imageListLength == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image *merged; if (imageListLength == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=(MagickRealType) TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; i=0; next=image; while (next != (Image *) NULL) { if (PSDSkipImage(image_info,i++) == MagickFalse) (void) SetImageProfile(next,GetStringInfoName(profile),profile, exception); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info,image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=(size_t) WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=(size_t) WriteBlobShort(image,ZipWithoutPrediction); #endif else length=(size_t) WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { MagickBooleanType monochrome; QuantumInfo *quantum_info; register const Quantum *p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE #define CHUNK 16384 int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK, sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) CHUNK; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) CHUNK-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if (next_image->storage_class != PseudoClass) { if (IsImageGray(next_image) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, (ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if (next_image->storage_class == PseudoClass) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,(const unsigned short) channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; register ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=WritePolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
par_coarsen.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * *****************************************************************************/ /* following should be in a header file */ #include "_hypre_parcsr_ls.h" /*==========================================================================*/ /*==========================================================================*/ /** Selects a coarse "grid" based on the graph of a matrix. Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the "build interpolation" routine. \item CF\_marker - an array indicating both C-pts (value = 1) and F-pts (value = -1) \end{itemize} \item We define the following temporary storage: \begin{itemize} \item measure\_array - an array containing the "measures" for each of the fine-grid points \item graph\_array - an array containing the list of points in the "current subgraph" being considered in the coarsening process. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $-a_ij >= \theta max_{l != j} \{-a_il\}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param S_ptr [OUT] strength matrix @param CF_marker_ptr [OUT] array indicating C/F points @see */ /*--------------------------------------------------------------------------*/ #define C_PT 1 #define F_PT -1 #define SF_PT -3 #define COMMON_C_PT 2 #define Z_PT -2 HYPRE_Int hypre_BoomerAMGCoarsen( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int CF_init, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = NULL; HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstColDiag(S); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)hypre_CSRMatrixNumCols(S_diag); HYPRE_Int num_cols_offd = 0; hypre_CSRMatrix *S_ext; HYPRE_Int *S_ext_i = NULL; HYPRE_BigInt *S_ext_j = NULL; HYPRE_Int num_sends = 0; HYPRE_Int *int_buf_data; HYPRE_Real *buf_data; HYPRE_Int *CF_marker; HYPRE_Int *CF_marker_offd; HYPRE_Real *measure_array; HYPRE_Int *graph_array; HYPRE_Int *graph_array_offd; HYPRE_Int graph_size; HYPRE_BigInt big_graph_size; HYPRE_Int graph_offd_size; HYPRE_BigInt global_graph_size; HYPRE_Int i, j, k, kc, jS, kS, ig, elmt; HYPRE_Int index, start, my_id, num_procs, jrow, cnt; HYPRE_Int use_commpkg_A = 0; HYPRE_Int break_var = 1; HYPRE_Real wall_time; HYPRE_Int iter = 0; HYPRE_BigInt big_k; #if 0 /* debugging */ char filename[256]; FILE *fp; HYPRE_Int iter = 0; #endif /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ S_ext = NULL; if (debug_flag == 3) wall_time = time_getWallclockSeconds(); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); if (!comm_pkg) { use_commpkg_A = 1; comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); num_cols_offd = hypre_CSRMatrixNumCols(S_offd); S_diag_j = hypre_CSRMatrixJ(S_diag); if (num_cols_offd) { S_offd_j = hypre_CSRMatrixJ(S_offd); } /*---------------------------------------------------------- * Compute the measures * * The measures are currently given by the column sums of S. * Hence, measure_array[i] is the number of influences * of variable i. * * The measures are augmented by a random number * between 0 and 1. *----------------------------------------------------------*/ measure_array = hypre_CTAlloc(HYPRE_Real, num_variables+num_cols_offd, HYPRE_MEMORY_HOST); for (i=0; i < S_offd_i[num_variables]; i++) { measure_array[num_variables + S_offd_j[i]] += 1.0; } if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg, &measure_array[num_variables], buf_data); for (i=0; i < S_diag_i[num_variables]; i++) { measure_array[S_diag_j[i]] += 1.0; } if (num_procs > 1) hypre_ParCSRCommHandleDestroy(comm_handle); index = 0; for (i=0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] += buf_data[index++]; } for (i=num_variables; i < num_variables+num_cols_offd; i++) { measure_array[i] = 0; } /* this augments the measures */ if (CF_init == 2) hypre_BoomerAMGIndepSetInit(S, measure_array, 1); else hypre_BoomerAMGIndepSetInit(S, measure_array, 0); /*--------------------------------------------------- * Initialize the graph array * graph_array contains interior points in elements 0 ... num_variables-1 * followed by boundary values *---------------------------------------------------*/ graph_array = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); if (num_cols_offd) graph_array_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); else graph_array_offd = NULL; /* initialize measure array and graph array */ for (ig = 0; ig < num_cols_offd; ig++) graph_array_offd[ig] = ig; /*--------------------------------------------------- * Initialize the C/F marker array * C/F marker array contains interior points in elements 0 ... * num_variables-1 followed by boundary values *---------------------------------------------------*/ graph_offd_size = num_cols_offd; if (CF_init==1) { CF_marker = *CF_marker_ptr; cnt = 0; for (i=0; i < num_variables; i++) { if ( (S_offd_i[i+1]-S_offd_i[i]) > 0 || CF_marker[i] == -1) { CF_marker[i] = 0; } if ( CF_marker[i] == Z_PT) { if (measure_array[i] >= 1.0 || (S_diag_i[i+1]-S_diag_i[i]) > 0) { CF_marker[i] = 0; graph_array[cnt++] = i; } else { CF_marker[i] = F_PT; } } else if (CF_marker[i] == SF_PT) measure_array[i] = 0; else graph_array[cnt++] = i; } } else { CF_marker = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_variables; i++) { CF_marker[i] = 0; if ( (S_diag_i[i+1]-S_diag_i[i]) == 0 && (S_offd_i[i+1]-S_offd_i[i]) == 0) { CF_marker[i] = SF_PT; measure_array[i] = 0; } else graph_array[cnt++] = i; } } graph_size = cnt; if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); else CF_marker_offd = NULL; for (i=0; i < num_cols_offd; i++) CF_marker_offd[i] = 0; /*--------------------------------------------------- * Loop until all points are either fine or coarse. *---------------------------------------------------*/ if (num_procs > 1) { if (use_commpkg_A) S_ext = hypre_ParCSRMatrixExtractBExt(S,A,0); else S_ext = hypre_ParCSRMatrixExtractBExt(S,S,0); S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixBigJ(S_ext); } /* compress S_ext and convert column numbers*/ index = 0; for (i=0; i < num_cols_offd; i++) { for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++) { big_k = S_ext_j[j]; if (big_k >= col_1 && big_k < col_n) { S_ext_j[index++] = big_k - col_1; } else { kc = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_offd); if (kc > -1) S_ext_j[index++] = (HYPRE_BigInt)(-kc-1); } } S_ext_i[i] = index; } for (i = num_cols_offd; i > 0; i--) S_ext_i[i] = S_ext_i[i-1]; if (num_procs > 1) S_ext_i[0] = 0; if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Initialize CLJP phase = %f\n", my_id, wall_time); } while (1) { /*------------------------------------------------ * Exchange boundary data, i.i. get measures and S_ext_data *------------------------------------------------*/ if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg, &measure_array[num_variables], buf_data); if (num_procs > 1) hypre_ParCSRCommHandleDestroy(comm_handle); index = 0; for (i=0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] += buf_data[index++]; } /*------------------------------------------------ * Set F-pts and update subgraph *------------------------------------------------*/ if (iter || (CF_init != 1)) { for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; if ( (CF_marker[i] != C_PT) && (measure_array[i] < 1) ) { /* set to be an F-pt */ CF_marker[i] = F_PT; /* make sure all dependencies have been accounted for */ for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { if (S_diag_j[jS] > -1) { CF_marker[i] = 0; } } for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { if (S_offd_j[jS] > -1) { CF_marker[i] = 0; } } } if (CF_marker[i]) { measure_array[i] = 0; /* take point out of the subgraph */ graph_size--; graph_array[ig] = graph_array[graph_size]; graph_array[graph_size] = i; ig--; } } } /*------------------------------------------------ * Exchange boundary data, i.i. get measures *------------------------------------------------*/ if (debug_flag == 3) wall_time = time_getWallclockSeconds(); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { jrow = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j); buf_data[index++] = measure_array[jrow]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data, &measure_array[num_variables]); hypre_ParCSRCommHandleDestroy(comm_handle); } /*------------------------------------------------ * Debugging: * * Uncomment the sections of code labeled * "debugging" to generate several files that * can be visualized using the `coarsen.m' * matlab routine. *------------------------------------------------*/ #if 0 /* debugging */ /* print out measures */ hypre_sprintf(filename, "coarsen.out.measures.%04d", iter); fp = fopen(filename, "w"); for (i = 0; i < num_variables; i++) { hypre_fprintf(fp, "%f\n", measure_array[i]); } fclose(fp); /* print out strength matrix */ hypre_sprintf(filename, "coarsen.out.strength.%04d", iter); hypre_CSRMatrixPrint(S, filename); /* print out C/F marker */ hypre_sprintf(filename, "coarsen.out.CF.%04d", iter); fp = fopen(filename, "w"); for (i = 0; i < num_variables; i++) { hypre_fprintf(fp, "%d\n", CF_marker[i]); } fclose(fp); iter++; #endif /*------------------------------------------------ * Test for convergence *------------------------------------------------*/ big_graph_size = (HYPRE_BigInt) graph_size; hypre_MPI_Allreduce(&big_graph_size,&global_graph_size,1,HYPRE_MPI_BIG_INT,hypre_MPI_SUM,comm); if (global_graph_size == 0) break; /*------------------------------------------------ * Pick an independent set of points with * maximal measure. *------------------------------------------------*/ if (iter || (CF_init != 1)) { hypre_BoomerAMGIndepSet(S, measure_array, graph_array, graph_size, graph_array_offd, graph_offd_size, CF_marker, CF_marker_offd); if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, CF_marker_offd, int_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1);j++) { elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j); if (!int_buf_data[index++] && CF_marker[elmt] > 0) { CF_marker[elmt] = 0; } } } } iter++; /*------------------------------------------------ * Exchange boundary data for CF_marker *------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j); int_buf_data[index++] = CF_marker[elmt]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } for (ig = 0; ig < graph_offd_size; ig++) { i = graph_array_offd[ig]; if (CF_marker_offd[i] < 0) { /* take point out of the subgraph */ graph_offd_size--; graph_array_offd[ig] = graph_array_offd[graph_offd_size]; graph_array_offd[graph_offd_size] = i; ig--; } } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d iter %d comm. and subgraph update = %f\n", my_id, iter, wall_time); } /*------------------------------------------------ * Set C_pts and apply heuristics. *------------------------------------------------*/ for (i=num_variables; i < num_variables+num_cols_offd; i++) { measure_array[i] = 0; } if (debug_flag == 3) wall_time = time_getWallclockSeconds(); for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; /*--------------------------------------------- * Heuristic: C-pts don't interpolate from * neighbors that influence them. *---------------------------------------------*/ if (CF_marker[i] > 0) { /* set to be a C-pt */ CF_marker[i] = C_PT; for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { j = S_diag_j[jS]; if (j > -1) { /* "remove" edge from S */ S_diag_j[jS] = -S_diag_j[jS]-1; /* decrement measures of unmarked neighbors */ if (!CF_marker[j]) { measure_array[j]--; } } } for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { j = S_offd_j[jS]; if (j > -1) { /* "remove" edge from S */ S_offd_j[jS] = -S_offd_j[jS]-1; /* decrement measures of unmarked neighbors */ if (!CF_marker_offd[j]) { measure_array[j+num_variables]--; } } } } else { /* marked dependencies */ for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { j = S_diag_j[jS]; if (j < 0) j = -j-1; if (CF_marker[j] > 0) { if (S_diag_j[jS] > -1) { /* "remove" edge from S */ S_diag_j[jS] = -S_diag_j[jS]-1; } /* IMPORTANT: consider all dependencies */ /* temporarily modify CF_marker */ CF_marker[j] = COMMON_C_PT; } else if (CF_marker[j] == SF_PT) { if (S_diag_j[jS] > -1) { /* "remove" edge from S */ S_diag_j[jS] = -S_diag_j[jS]-1; } } } for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { j = S_offd_j[jS]; if (j < 0) j = -j-1; if (CF_marker_offd[j] > 0) { if (S_offd_j[jS] > -1) { /* "remove" edge from S */ S_offd_j[jS] = -S_offd_j[jS]-1; } /* IMPORTANT: consider all dependencies */ /* temporarily modify CF_marker */ CF_marker_offd[j] = COMMON_C_PT; } else if (CF_marker_offd[j] == SF_PT) { if (S_offd_j[jS] > -1) { /* "remove" edge from S */ S_offd_j[jS] = -S_offd_j[jS]-1; } } } /* unmarked dependencies */ for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { if (S_diag_j[jS] > -1) { j = S_diag_j[jS]; break_var = 1; /* check for common C-pt */ for (kS = S_diag_i[j]; kS < S_diag_i[j+1]; kS++) { k = S_diag_j[kS]; if (k < 0) k = -k-1; /* IMPORTANT: consider all dependencies */ if (CF_marker[k] == COMMON_C_PT) { /* "remove" edge from S and update measure*/ S_diag_j[jS] = -S_diag_j[jS]-1; measure_array[j]--; break_var = 0; break; } } if (break_var) { for (kS = S_offd_i[j]; kS < S_offd_i[j+1]; kS++) { k = S_offd_j[kS]; if (k < 0) k = -k-1; /* IMPORTANT: consider all dependencies */ if ( CF_marker_offd[k] == COMMON_C_PT) { /* "remove" edge from S and update measure*/ S_diag_j[jS] = -S_diag_j[jS]-1; measure_array[j]--; break; } } } } } for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { if (S_offd_j[jS] > -1) { j = S_offd_j[jS]; /* check for common C-pt */ for (kS = S_ext_i[j]; kS < S_ext_i[j+1]; kS++) { k = (HYPRE_Int)S_ext_j[kS]; if (k >= 0) { /* IMPORTANT: consider all dependencies */ if (CF_marker[k] == COMMON_C_PT) { /* "remove" edge from S and update measure*/ S_offd_j[jS] = -S_offd_j[jS]-1; measure_array[j+num_variables]--; break; } } else { kc = -k-1; if (kc > -1 && CF_marker_offd[kc] == COMMON_C_PT) { /* "remove" edge from S and update measure*/ S_offd_j[jS] = -S_offd_j[jS]-1; measure_array[j+num_variables]--; break; } } } } } } /* reset CF_marker */ for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { j = S_diag_j[jS]; if (j < 0) j = -j-1; if (CF_marker[j] == COMMON_C_PT) { CF_marker[j] = C_PT; } } for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { j = S_offd_j[jS]; if (j < 0) j = -j-1; if (CF_marker_offd[j] == COMMON_C_PT) { CF_marker_offd[j] = C_PT; } } } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d CLJP phase = %f graph_size = %d nc_offd = %d\n", my_id, wall_time, graph_size, num_cols_offd); } } /*--------------------------------------------------- * Clean up and return *---------------------------------------------------*/ /* Reset S_matrix */ for (i=0; i < S_diag_i[num_variables]; i++) { if (S_diag_j[i] < 0) S_diag_j[i] = -S_diag_j[i]-1; } for (i=0; i < S_offd_i[num_variables]; i++) { if (S_offd_j[i] < 0) S_offd_j[i] = -S_offd_j[i]-1; } /*for (i=0; i < num_variables; i++) if (CF_marker[i] == SF_PT) CF_marker[i] = F_PT;*/ hypre_TFree(measure_array, HYPRE_MEMORY_HOST); hypre_TFree(graph_array, HYPRE_MEMORY_HOST); if (num_cols_offd) hypre_TFree(graph_array_offd, HYPRE_MEMORY_HOST); hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext); *CF_marker_ptr = CF_marker; return hypre_error_flag; } /*========================================================================== * Ruge's coarsening algorithm *==========================================================================*/ #define C_PT 1 #define F_PT -1 #define Z_PT -2 #define SF_PT -3 /* special fine points */ #define SC_PT 3 /* special coarse points */ #define UNDECIDED 0 /************************************************************** * * Ruge Coarsening routine * **************************************************************/ HYPRE_Int hypre_BoomerAMGCoarsenRuge( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int measure_type, HYPRE_Int coarsen_type, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_j = hypre_CSRMatrixJ(S_diag); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = NULL; HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(S_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); hypre_CSRMatrix *S_ext = NULL; HYPRE_Int *S_ext_i = NULL; HYPRE_BigInt *S_ext_j = NULL; hypre_CSRMatrix *ST; HYPRE_Int *ST_i; HYPRE_Int *ST_j; HYPRE_Int *CF_marker; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int ci_tilde = -1; HYPRE_Int ci_tilde_mark = -1; HYPRE_Int ci_tilde_offd = -1; HYPRE_Int ci_tilde_offd_mark = -1; HYPRE_Int *measure_array; HYPRE_Int *graph_array; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *ci_array = NULL; HYPRE_BigInt big_k; HYPRE_Int i, j, k, jS; HYPRE_Int ji, jj, jk, jm, index; HYPRE_Int set_empty = 1; HYPRE_Int C_i_nonempty = 0; //HYPRE_Int num_nonzeros; HYPRE_Int num_procs, my_id; HYPRE_Int num_sends = 0; HYPRE_BigInt first_col; HYPRE_Int start; HYPRE_BigInt col_0, col_n; hypre_LinkList LoL_head; hypre_LinkList LoL_tail; HYPRE_Int *lists, *where; HYPRE_Int measure, new_meas; HYPRE_Int meas_type = 0; HYPRE_Int agg_2 = 0; HYPRE_Int num_left, elmt; HYPRE_Int nabor, nabor_two; HYPRE_Int use_commpkg_A = 0; HYPRE_Int break_var = 0; HYPRE_Int f_pnt = F_PT; HYPRE_Real wall_time; if (coarsen_type < 0) coarsen_type = -coarsen_type; if (measure_type == 1 || measure_type == 4) meas_type = 1; if (measure_type == 4 || measure_type == 3) agg_2 = 1; /*------------------------------------------------------- * Initialize the C/F marker, LoL_head, LoL_tail arrays *-------------------------------------------------------*/ LoL_head = NULL; LoL_tail = NULL; lists = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); where = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); #if 0 /* debugging */ char filename[256]; FILE *fp; HYPRE_Int iter = 0; #endif /*-------------------------------------------------------------- * Compute a CSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ if (debug_flag == 3) wall_time = time_getWallclockSeconds(); first_col = hypre_ParCSRMatrixFirstColDiag(S); col_0 = first_col-1; col_n = col_0+(HYPRE_BigInt)num_variables; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); if (!comm_pkg) { use_commpkg_A = 1; comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_cols_offd) S_offd_j = hypre_CSRMatrixJ(S_offd); jS = S_i[num_variables]; ST = hypre_CSRMatrixCreate(num_variables, num_variables, jS); ST_i = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST); ST_j = hypre_CTAlloc(HYPRE_Int, jS, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(ST) = ST_i; hypre_CSRMatrixJ(ST) = ST_j; /*---------------------------------------------------------- * generate transpose of S, ST *----------------------------------------------------------*/ for (i=0; i <= num_variables; i++) ST_i[i] = 0; for (i=0; i < jS; i++) { ST_i[S_j[i]+1]++; } for (i=0; i < num_variables; i++) { ST_i[i+1] += ST_i[i]; } for (i=0; i < num_variables; i++) { for (j=S_i[i]; j < S_i[i+1]; j++) { index = S_j[j]; ST_j[ST_i[index]] = i; ST_i[index]++; } } for (i = num_variables; i > 0; i--) { ST_i[i] = ST_i[i-1]; } ST_i[0] = 0; /*---------------------------------------------------------- * Compute the measures * * The measures are given by the row sums of ST. * Hence, measure_array[i] is the number of influences * of variable i. * correct actual measures through adding influences from * neighbor processors *----------------------------------------------------------*/ measure_array = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); for (i = 0; i < num_variables; i++) { measure_array[i] = ST_i[i+1]-ST_i[i]; } /* special case for Falgout coarsening */ if (coarsen_type == 6) { f_pnt = Z_PT; coarsen_type = 1; } if (coarsen_type == 10) { f_pnt = Z_PT; coarsen_type = 11; } if ((meas_type || (coarsen_type != 1 && coarsen_type != 11)) && num_procs > 1) { if (use_commpkg_A) S_ext = hypre_ParCSRMatrixExtractBExt(S,A,0); else S_ext = hypre_ParCSRMatrixExtractBExt(S,S,0); S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixBigJ(S_ext); HYPRE_Int num_nonzeros = S_ext_i[num_cols_offd]; /*first_col = hypre_ParCSRMatrixFirstColDiag(S); col_0 = first_col-1; col_n = col_0+num_variables; */ if (meas_type) { for (i=0; i < num_nonzeros; i++) { index = (HYPRE_Int)(S_ext_j[i] - first_col); if (index > -1 && index < num_variables) measure_array[index]++; } } } /*--------------------------------------------------- * Loop until all points are either fine or coarse. *---------------------------------------------------*/ if (debug_flag == 3) wall_time = time_getWallclockSeconds(); /* first coarsening phase */ /************************************************************* * * Initialize the lists * *************************************************************/ CF_marker = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); num_left = 0; for (j = 0; j < num_variables; j++) { if ((S_i[j+1]-S_i[j])== 0 && (S_offd_i[j+1]-S_offd_i[j]) == 0) { CF_marker[j] = SF_PT; if (agg_2) CF_marker[j] = SC_PT; measure_array[j] = 0; } else { CF_marker[j] = UNDECIDED; num_left++; } } for (j = 0; j < num_variables; j++) { measure = measure_array[j]; if (CF_marker[j] != SF_PT && CF_marker[j] != SC_PT) { if (measure > 0) { hypre_enter_on_lists(&LoL_head, &LoL_tail, measure, j, lists, where); } else { if (measure < 0) hypre_error_w_msg(HYPRE_ERROR_GENERIC,"negative measure!\n"); /*if (measure < 0) hypre_printf("negative measure!\n");*/ CF_marker[j] = f_pnt; for (k = S_i[j]; k < S_i[j+1]; k++) { nabor = S_j[k]; if (CF_marker[nabor] != SF_PT && CF_marker[nabor] != SC_PT) { if (nabor < j) { new_meas = measure_array[nabor]; if (new_meas > 0) hypre_remove_point(&LoL_head, &LoL_tail, new_meas, nabor, lists, where); new_meas = ++(measure_array[nabor]); hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas, nabor, lists, where); } else { new_meas = ++(measure_array[nabor]); } } } --num_left; } } } /**************************************************************** * * Main loop of Ruge-Stueben first coloring pass. * * WHILE there are still points to classify DO: * 1) find first point, i, on list with max_measure * make i a C-point, remove it from the lists * 2) For each point, j, in S_i^T, * a) Set j to be an F-point * b) For each point, k, in S_j * move k to the list in LoL with measure one * greater than it occupies (creating new LoL * entry if necessary) * 3) For each point, j, in S_i, * move j to the list in LoL with measure one * smaller than it occupies (creating new LoL * entry if necessary) * ****************************************************************/ while (num_left > 0) { index = LoL_head -> head; CF_marker[index] = C_PT; measure = measure_array[index]; measure_array[index] = 0; --num_left; hypre_remove_point(&LoL_head, &LoL_tail, measure, index, lists, where); for (j = ST_i[index]; j < ST_i[index+1]; j++) { nabor = ST_j[j]; if (CF_marker[nabor] == UNDECIDED) { CF_marker[nabor] = F_PT; measure = measure_array[nabor]; hypre_remove_point(&LoL_head, &LoL_tail, measure, nabor, lists, where); --num_left; for (k = S_i[nabor]; k < S_i[nabor+1]; k++) { nabor_two = S_j[k]; if (CF_marker[nabor_two] == UNDECIDED) { measure = measure_array[nabor_two]; hypre_remove_point(&LoL_head, &LoL_tail, measure, nabor_two, lists, where); new_meas = ++(measure_array[nabor_two]); hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas, nabor_two, lists, where); } } } } for (j = S_i[index]; j < S_i[index+1]; j++) { nabor = S_j[j]; if (CF_marker[nabor] == UNDECIDED) { measure = measure_array[nabor]; hypre_remove_point(&LoL_head, &LoL_tail, measure, nabor, lists, where); measure_array[nabor] = --measure; if (measure > 0) hypre_enter_on_lists(&LoL_head, &LoL_tail, measure, nabor, lists, where); else { CF_marker[nabor] = F_PT; --num_left; for (k = S_i[nabor]; k < S_i[nabor+1]; k++) { nabor_two = S_j[k]; if (CF_marker[nabor_two] == UNDECIDED) { new_meas = measure_array[nabor_two]; hypre_remove_point(&LoL_head, &LoL_tail, new_meas, nabor_two, lists, where); new_meas = ++(measure_array[nabor_two]); hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas, nabor_two, lists, where); } } } } } } hypre_TFree(measure_array, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(ST); if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Coarsen 1st pass = %f\n", my_id, wall_time); } hypre_TFree(lists, HYPRE_MEMORY_HOST); hypre_TFree(where, HYPRE_MEMORY_HOST); hypre_TFree(LoL_head, HYPRE_MEMORY_HOST); hypre_TFree(LoL_tail, HYPRE_MEMORY_HOST); for (i=0; i < num_variables; i++) if (CF_marker[i] == SC_PT) CF_marker[i] = C_PT; if (coarsen_type == 11) { *CF_marker_ptr = CF_marker; if (meas_type && num_procs > 1) hypre_CSRMatrixDestroy(S_ext); return 0; } /* second pass, check fine points for coarse neighbors for coarsen_type = 2, the second pass includes off-processore boundary points */ /*--------------------------------------------------- * Initialize the graph array *---------------------------------------------------*/ graph_array = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); for (i = 0; i < num_variables; i++) { graph_array[i] = -1; } if (debug_flag == 3) wall_time = time_getWallclockSeconds(); if (coarsen_type == 2) { /*------------------------------------------------ * Exchange boundary data for CF_marker *------------------------------------------------*/ CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } ci_array = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd; i++) ci_array[i] = -1; for (i=0; i < num_variables; i++) { if (ci_tilde_mark != i) ci_tilde = -1; if (ci_tilde_offd_mark != i) ci_tilde_offd = -1; if (CF_marker[i] == -1) { break_var = 1; for (ji = S_i[i]; ji < S_i[i+1]; ji++) { j = S_j[ji]; if (CF_marker[j] > 0) graph_array[j] = i; } for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++) { j = S_offd_j[ji]; if (CF_marker_offd[j] > 0) ci_array[j] = i; } for (ji = S_i[i]; ji < S_i[i+1]; ji++) { j = S_j[ji]; if (CF_marker[j] == -1) { set_empty = 1; for (jj = S_i[j]; jj < S_i[j+1]; jj++) { index = S_j[jj]; if (graph_array[index] == i) { set_empty = 0; break; } } if (set_empty) { for (jj = S_offd_i[j]; jj < S_offd_i[j+1]; jj++) { index = S_offd_j[jj]; if (ci_array[index] == i) { set_empty = 0; break; } } } if (set_empty) { if (C_i_nonempty) { CF_marker[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } if (ci_tilde_offd > -1) { CF_marker_offd[ci_tilde_offd] = -1; ci_tilde_offd = -1; } C_i_nonempty = 0; break_var = 0; break; } else { ci_tilde = j; ci_tilde_mark = i; CF_marker[j] = 1; C_i_nonempty = 1; i--; break_var = 0; break; } } } } if (break_var) { for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++) { j = S_offd_j[ji]; if (CF_marker_offd[j] == -1) { set_empty = 1; for (jj = S_ext_i[j]; jj < S_ext_i[j+1]; jj++) { big_k = S_ext_j[jj]; if (big_k > col_0 && big_k < col_n) /* index interior */ { if (graph_array[(HYPRE_Int)(big_k-first_col)] == i) { set_empty = 0; break; } } else { jk = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_offd); if (jk != -1) { if (ci_array[jk] == i) { set_empty = 0; break; } } } } if (set_empty) { if (C_i_nonempty) { CF_marker[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } if (ci_tilde_offd > -1) { CF_marker_offd[ci_tilde_offd] = -1; ci_tilde_offd = -1; } C_i_nonempty = 0; break; } else { ci_tilde_offd = j; ci_tilde_offd_mark = i; CF_marker_offd[j] = 1; C_i_nonempty = 1; i--; break; } } } } } } } } else { for (i=0; i < num_variables; i++) { if (ci_tilde_mark != i) ci_tilde = -1; if (CF_marker[i] == -1) { for (ji = S_i[i]; ji < S_i[i+1]; ji++) { j = S_j[ji]; if (CF_marker[j] > 0) graph_array[j] = i; } for (ji = S_i[i]; ji < S_i[i+1]; ji++) { j = S_j[ji]; if (CF_marker[j] == -1) { set_empty = 1; for (jj = S_i[j]; jj < S_i[j+1]; jj++) { index = S_j[jj]; if (graph_array[index] == i) { set_empty = 0; break; } } if (set_empty) { if (C_i_nonempty) { CF_marker[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } C_i_nonempty = 0; break; } else { ci_tilde = j; ci_tilde_mark = i; CF_marker[j] = 1; C_i_nonempty = 1; i--; break; } } } } } } } if (debug_flag == 3 && coarsen_type != 2) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Coarsen 2nd pass = %f\n", my_id, wall_time); } /* third pass, check boundary fine points for coarse neighbors */ if (coarsen_type == 3 || coarsen_type == 4) { if (debug_flag == 3) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); /*------------------------------------------------ * Exchange boundary data for CF_marker *------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } ci_array = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd; i++) ci_array[i] = -1; } if (coarsen_type > 1 && coarsen_type < 5) { for (i=0; i < num_variables; i++) graph_array[i] = -1; for (i=0; i < num_cols_offd; i++) { if (ci_tilde_mark != i) ci_tilde = -1; if (ci_tilde_offd_mark != i) ci_tilde_offd = -1; if (CF_marker_offd[i] == -1) { for (ji = S_ext_i[i]; ji < S_ext_i[i+1]; ji++) { big_k = S_ext_j[ji]; if (big_k > col_0 && big_k < col_n) { j = (HYPRE_Int)(big_k - first_col); if (CF_marker[j] > 0) graph_array[j] = i; } else { jj = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_offd); if (jj != -1 && CF_marker_offd[jj] > 0) ci_array[jj] = i; } } for (ji = S_ext_i[i]; ji < S_ext_i[i+1]; ji++) { big_k = S_ext_j[ji]; if (big_k > col_0 && big_k < col_n) { j = (HYPRE_Int)(big_k - first_col); if ( CF_marker[j] == -1) { set_empty = 1; for (jj = S_i[j]; jj < S_i[j+1]; jj++) { index = S_j[jj]; if (graph_array[index] == i) { set_empty = 0; break; } } for (jj = S_offd_i[j]; jj < S_offd_i[j+1]; jj++) { index = S_offd_j[jj]; if (ci_array[index] == i) { set_empty = 0; break; } } if (set_empty) { if (C_i_nonempty) { CF_marker_offd[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } if (ci_tilde_offd > -1) { CF_marker_offd[ci_tilde_offd] = -1; ci_tilde_offd = -1; } C_i_nonempty = 0; break; } else { ci_tilde = j; ci_tilde_mark = i; CF_marker[j] = 1; C_i_nonempty = 1; i--; break; } } } } else { jm = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_offd); if (jm != -1 && CF_marker_offd[jm] == -1) { set_empty = 1; for (jj = S_ext_i[jm]; jj < S_ext_i[jm+1]; jj++) { big_k = S_ext_j[jj]; if (big_k > col_0 && big_k < col_n) { if (graph_array[(HYPRE_Int)(big_k-first_col)] == i) { set_empty = 0; break; } } else { jk = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_offd); if (jk != -1) { if (ci_array[jk] == i) { set_empty = 0; break; } } } } if (set_empty) { if (C_i_nonempty) { CF_marker_offd[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } if (ci_tilde_offd > -1) { CF_marker_offd[ci_tilde_offd] = -1; ci_tilde_offd = -1; } C_i_nonempty = 0; break; } else { ci_tilde_offd = jm; ci_tilde_offd_mark = i; CF_marker_offd[jm] = 1; C_i_nonempty = 1; i--; break; } } } } } } } /*------------------------------------------------ * Send boundary data for CF_marker back *------------------------------------------------*/ if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, CF_marker_offd, int_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); } /* only CF_marker entries from larger procs are accepted if coarsen_type = 4 coarse points are not overwritten */ index = 0; if (coarsen_type != 4) { for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); if (hypre_ParCSRCommPkgSendProc(comm_pkg,i) > my_id) { for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] = int_buf_data[index++]; } else { index += hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - start; } } } else { for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); if (hypre_ParCSRCommPkgSendProc(comm_pkg,i) > my_id) { for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j); if (CF_marker[elmt] != 1) CF_marker[elmt] = int_buf_data[index]; index++; } } else { index += hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1) - start; } } } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; if (coarsen_type == 4) hypre_printf("Proc = %d Coarsen 3rd pass = %f\n", my_id, wall_time); if (coarsen_type == 3) hypre_printf("Proc = %d Coarsen 3rd pass = %f\n", my_id, wall_time); if (coarsen_type == 2) hypre_printf("Proc = %d Coarsen 2nd pass = %f\n", my_id, wall_time); } } if (coarsen_type == 5) { /*------------------------------------------------ * Exchange boundary data for CF_marker *------------------------------------------------*/ if (debug_flag == 3) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } ci_array = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd; i++) ci_array[i] = -1; for (i=0; i < num_variables; i++) graph_array[i] = -1; for (i=0; i < num_variables; i++) { if (CF_marker[i] == -1 && (S_offd_i[i+1]-S_offd_i[i]) > 0) { break_var = 1; for (ji = S_i[i]; ji < S_i[i+1]; ji++) { j = S_j[ji]; if (CF_marker[j] > 0) graph_array[j] = i; } for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++) { j = S_offd_j[ji]; if (CF_marker_offd[j] > 0) ci_array[j] = i; } for (ji = S_offd_i[i]; ji < S_offd_i[i+1]; ji++) { j = S_offd_j[ji]; if (CF_marker_offd[j] == -1) { set_empty = 1; for (jj = S_ext_i[j]; jj < S_ext_i[j+1]; jj++) { big_k = S_ext_j[jj]; if (big_k > col_0 && big_k < col_n) /* index interior */ { if (graph_array[(HYPRE_Int)(big_k-first_col)] == i) { set_empty = 0; break; } } else { jk = hypre_BigBinarySearch(col_map_offd,big_k,num_cols_offd); if (jk != -1) { if (ci_array[jk] == i) { set_empty = 0; break; } } } } if (set_empty) { if (C_i_nonempty) { CF_marker[i] = -2; C_i_nonempty = 0; break; } else { C_i_nonempty = 1; i--; break; } } } } } } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Coarsen special points = %f\n", my_id, wall_time); } } /*--------------------------------------------------- * Clean up and return *---------------------------------------------------*/ /*if (coarsen_type != 1) { */ hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(ci_array, HYPRE_MEMORY_HOST); /*} */ hypre_TFree(graph_array, HYPRE_MEMORY_HOST); if ((meas_type || (coarsen_type != 1 && coarsen_type != 11)) && num_procs > 1) hypre_CSRMatrixDestroy(S_ext); *CF_marker_ptr = CF_marker; return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGCoarsenFalgout( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int measure_type, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr) { HYPRE_Int ierr = 0; /*------------------------------------------------------- * Perform Ruge coarsening followed by CLJP coarsening *-------------------------------------------------------*/ ierr += hypre_BoomerAMGCoarsenRuge (S, A, measure_type, 6, debug_flag, CF_marker_ptr); ierr += hypre_BoomerAMGCoarsen (S, A, 1, debug_flag, CF_marker_ptr); return (ierr); } HYPRE_Int hypre_BoomerAMGCoarsenHMIS( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int measure_type, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr) { HYPRE_Int ierr = 0; /*------------------------------------------------------- * Perform Ruge coarsening followed by CLJP coarsening *-------------------------------------------------------*/ ierr += hypre_BoomerAMGCoarsenRuge (S, A, measure_type, 10, debug_flag, CF_marker_ptr); ierr += hypre_BoomerAMGCoarsenPMIS (S, A, 1, debug_flag, CF_marker_ptr); return (ierr); } /*--------------------------------------------------------------------------*/ #define C_PT 1 #define F_PT -1 #define SF_PT -3 #define COMMON_C_PT 2 #define Z_PT -2 /* begin HANS added */ /************************************************************** * * Modified Independent Set Coarsening routine * (don't worry about strong F-F connections * without a common C point) * **************************************************************/ HYPRE_Int hypre_BoomerAMGCoarsenPMIS( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int CF_init, HYPRE_Int debug_flag, HYPRE_Int **CF_marker_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PMIS] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j; HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int num_cols_offd = 0; /* hypre_CSRMatrix *S_ext; HYPRE_Int *S_ext_i; HYPRE_Int *S_ext_j; */ HYPRE_Int num_sends = 0; HYPRE_Int *int_buf_data; HYPRE_Real *buf_data; HYPRE_Int *CF_marker; HYPRE_Int *CF_marker_offd; HYPRE_Real *measure_array; HYPRE_Int *graph_array; HYPRE_Int *graph_array_offd; HYPRE_Int graph_size; HYPRE_BigInt big_graph_size; HYPRE_Int graph_offd_size; HYPRE_BigInt global_graph_size; HYPRE_Int i, j, jj, jS, ig; HYPRE_Int index, start, my_id, num_procs, jrow, cnt, elmt; HYPRE_Int ierr = 0; HYPRE_Real wall_time; HYPRE_Int iter = 0; HYPRE_Int *prefix_sum_workspace; #if 0 /* debugging */ char filename[256]; FILE *fp; HYPRE_Int iter = 0; #endif /******************************************************************************* BEFORE THE INDEPENDENT SET COARSENING LOOP: measure_array: calculate the measures, and communicate them (this array contains measures for both local and external nodes) CF_marker, CF_marker_offd: initialize CF_marker (separate arrays for local and external; 0=unassigned, negative=F point, positive=C point) ******************************************************************************/ /*-------------------------------------------------------------- * Use the ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: S_data is not used; in stead, only strong columns are retained * in S_j, which can then be used like S_data *----------------------------------------------------------------*/ /*S_ext = NULL; */ if (debug_flag == 3) wall_time = time_getWallclockSeconds(); hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); if (!comm_pkg) { comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); num_cols_offd = hypre_CSRMatrixNumCols(S_offd); S_diag_j = hypre_CSRMatrixJ(S_diag); if (num_cols_offd) { S_offd_j = hypre_CSRMatrixJ(S_offd); } /*---------------------------------------------------------- * Compute the measures * * The measures are currently given by the column sums of S. * Hence, measure_array[i] is the number of influences * of variable i. * * The measures are augmented by a random number * between 0 and 1. *----------------------------------------------------------*/ measure_array = hypre_CTAlloc(HYPRE_Real, num_variables+num_cols_offd, HYPRE_MEMORY_HOST); /* first calculate the local part of the sums for the external nodes */ #ifdef HYPRE_USING_OPENMP HYPRE_Int *measure_array_temp = hypre_CTAlloc(HYPRE_Int, num_variables+num_cols_offd, HYPRE_MEMORY_HOST); #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE for (i=0; i < S_offd_i[num_variables]; i++) { #pragma omp atomic measure_array_temp[num_variables + S_offd_j[i]]++; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE for (i=0; i < num_cols_offd; i++) { measure_array[i + num_variables] = measure_array_temp[i + num_variables]; } #else for (i=0; i < S_offd_i[num_variables]; i++) { measure_array[num_variables + S_offd_j[i]] += 1.0; } #endif // HYPRE_USING_OPENMP /* now send those locally calculated values for the external nodes to the neighboring processors */ if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg, &measure_array[num_variables], buf_data); /* calculate the local part for the local nodes */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE for (i=0; i < S_diag_i[num_variables]; i++) { #pragma omp atomic measure_array_temp[S_diag_j[i]]++; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE for (i=0; i < num_variables; i++) { measure_array[i] = measure_array_temp[i]; } hypre_TFree(measure_array_temp, HYPRE_MEMORY_HOST); #else for (i=0; i < S_diag_i[num_variables]; i++) { measure_array[S_diag_j[i]] += 1.0; } #endif // HYPRE_USING_OPENMP /* finish the communication */ if (num_procs > 1) hypre_ParCSRCommHandleDestroy(comm_handle); /* now add the externally calculated part of the local nodes to the local nodes */ index = 0; for (i=0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)] += buf_data[index++]; } /* set the measures of the external nodes to zero */ for (i=num_variables; i < num_variables+num_cols_offd; i++) { measure_array[i] = 0; } /* this augments the measures with a random number between 0 and 1 */ /* (only for the local part) */ /* this augments the measures */ if (CF_init == 2 || CF_init == 4) hypre_BoomerAMGIndepSetInit(S, measure_array, 1); else hypre_BoomerAMGIndepSetInit(S, measure_array, 0); /*--------------------------------------------------- * Initialize the graph arrays, and CF_marker arrays *---------------------------------------------------*/ /* first the off-diagonal part of the graph array */ if (num_cols_offd) graph_array_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); else graph_array_offd = NULL; for (ig = 0; ig < num_cols_offd; ig++) graph_array_offd[ig] = ig; graph_offd_size = num_cols_offd; /* now the local part of the graph array, and the local CF_marker array */ graph_array = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); if (CF_init==1) { CF_marker = *CF_marker_ptr; cnt = 0; for (i=0; i < num_variables; i++) { if ( (S_offd_i[i+1]-S_offd_i[i]) > 0 || CF_marker[i] == -1) { CF_marker[i] = 0; } if ( CF_marker[i] == Z_PT) { if (measure_array[i] >= 1.0 || (S_diag_i[i+1]-S_diag_i[i]) > 0) { CF_marker[i] = 0; graph_array[cnt++] = i; } else { CF_marker[i] = F_PT; } } else if (CF_marker[i] == SF_PT) measure_array[i] = 0; else graph_array[cnt++] = i; } } else { CF_marker = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); cnt = 0; for (i=0; i < num_variables; i++) { CF_marker[i] = 0; if ( (S_diag_i[i+1]-S_diag_i[i]) == 0 && (S_offd_i[i+1]-S_offd_i[i]) == 0) { CF_marker[i] = SF_PT; /* an isolated fine grid */ if (CF_init == 3 || CF_init == 4) CF_marker[i] = C_PT; measure_array[i] = 0; } else graph_array[cnt++] = i; } } graph_size = cnt; /* now the off-diagonal part of CF_marker */ if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); else CF_marker_offd = NULL; for (i=0; i < num_cols_offd; i++) CF_marker_offd[i] = 0; /*------------------------------------------------ * Communicate the local measures, which are complete, to the external nodes *------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { jrow = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j); buf_data[index++] = measure_array[jrow]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data, &measure_array[num_variables]); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Initialize CLJP phase = %f\n", my_id, wall_time); } HYPRE_Int *graph_array2 = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); HYPRE_Int *graph_array_offd2 = NULL; if (num_cols_offd) graph_array_offd2 = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); /******************************************************************************* THE INDEPENDENT SET COARSENING LOOP: ******************************************************************************/ /*--------------------------------------------------- * Loop until all points are either fine or coarse. *---------------------------------------------------*/ while (1) { big_graph_size = (HYPRE_BigInt) graph_size; /* stop the coarsening if nothing left to be coarsened */ hypre_MPI_Allreduce(&big_graph_size,&global_graph_size,1,HYPRE_MPI_BIG_INT,hypre_MPI_SUM,comm); if (global_graph_size == 0) break; /* hypre_printf("\n"); hypre_printf("*** MIS iteration %d\n",iter); hypre_printf("graph_size remaining %d\n",graph_size);*/ /*------------------------------------------------ * Pick an independent set of points with * maximal measure. At the end, CF_marker is complete, but still needs to be communicated to CF_marker_offd *------------------------------------------------*/ if (!CF_init || iter) { /*hypre_BoomerAMGIndepSet(S, measure_array, graph_array, graph_size, graph_array_offd, graph_offd_size, CF_marker, CF_marker_offd);*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE #endif for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; if (measure_array[i] > 1) { CF_marker[i] = 1; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE #endif for (ig = 0; ig < graph_offd_size; ig++) { i = graph_array_offd[ig]; if (measure_array[i+num_variables] > 1) { CF_marker_offd[i] = 1; } } /*------------------------------------------------------- * Remove nodes from the initial independent set *-------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ig, i, jS, j, jj) HYPRE_SMP_SCHEDULE #endif for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; if (measure_array[i] > 1) { for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { j = S_diag_j[jS]; if (measure_array[j] > 1) { if (measure_array[i] > measure_array[j]) CF_marker[j] = 0; else if (measure_array[j] > measure_array[i]) CF_marker[i] = 0; } } /* for each local neighbor j of i */ for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { jj = S_offd_j[jS]; j = num_variables+jj; if (measure_array[j] > 1) { if (measure_array[i] > measure_array[j]) CF_marker_offd[jj] = 0; else if (measure_array[j] > measure_array[i]) CF_marker[i] = 0; } } } /* for each node with measure > 1 */ } /* for each node i */ /*------------------------------------------------ * Exchange boundary data for CF_marker: send internal points to external points *------------------------------------------------*/ if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, CF_marker_offd, int_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) { elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j); if (!int_buf_data[index] && CF_marker[elmt] > 0) { CF_marker[elmt] = 0; index++; } else { int_buf_data[index++] = CF_marker[elmt]; } } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } } iter++; /*------------------------------------------------ * Set C-pts and F-pts. *------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ig, i, jS, j) HYPRE_SMP_SCHEDULE #endif for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; /*--------------------------------------------- * If the measure of i is smaller than 1, then * make i and F point (because it does not influence * any other point) *---------------------------------------------*/ if(measure_array[i]<1.) CF_marker[i]= F_PT; /*--------------------------------------------- * First treat the case where point i is in the * independent set: make i a C point, *---------------------------------------------*/ if (CF_marker[i] > 0) CF_marker[i] = C_PT; /*--------------------------------------------- * Now treat the case where point i is not in the * independent set: loop over * all the points j that influence equation i; if * j is a C point, then make i an F point. *---------------------------------------------*/ else { /* first the local part */ for (jS = S_diag_i[i]; jS < S_diag_i[i+1]; jS++) { /* j is the column number, or the local number of the point influencing i */ j = S_diag_j[jS]; if (CF_marker[j] > 0) /* j is a C-point */ CF_marker[i] = F_PT; } /* now the external part */ for (jS = S_offd_i[i]; jS < S_offd_i[i+1]; jS++) { j = S_offd_j[jS]; if (CF_marker_offd[j] > 0) /* j is a C-point */ CF_marker[i] = F_PT; } } /* end else */ } /* end first loop over graph */ /* now communicate CF_marker to CF_marker_offd, to make sure that new external F points are known on this processor */ /*------------------------------------------------ * Exchange boundary data for CF_marker: send internal points to external points *------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } /*------------------------------------------------ * Update subgraph *------------------------------------------------*/ /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2*(hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(ig,i) #endif { HYPRE_Int private_graph_size_cnt = 0; HYPRE_Int private_graph_offd_size_cnt = 0; HYPRE_Int ig_begin, ig_end; hypre_GetSimpleThreadPartition(&ig_begin, &ig_end, graph_size); HYPRE_Int ig_offd_begin, ig_offd_end; hypre_GetSimpleThreadPartition(&ig_offd_begin, &ig_offd_end, graph_offd_size); for (ig = ig_begin; ig < ig_end; ig++) { i = graph_array[ig]; if (CF_marker[i]!=0) /* C or F point */ { /* the independent set subroutine needs measure 0 for removed nodes */ measure_array[i] = 0; } else { private_graph_size_cnt++; } } for (ig = ig_offd_begin; ig < ig_offd_end; ig++) { i = graph_array_offd[ig]; if (CF_marker_offd[i]!=0) /* C of F point */ { /* the independent set subroutine needs measure 0 for removed nodes */ measure_array[i + num_variables] = 0; } else { private_graph_offd_size_cnt++; } } hypre_prefix_sum_pair(&private_graph_size_cnt, &graph_size, &private_graph_offd_size_cnt, &graph_offd_size, prefix_sum_workspace); for (ig = ig_begin; ig < ig_end; ig++) { i = graph_array[ig]; if (CF_marker[i]==0) { graph_array2[private_graph_size_cnt++] = i; } } for (ig = ig_offd_begin; ig < ig_offd_end; ig++) { i = graph_array_offd[ig]; if (CF_marker_offd[i]==0) { graph_array_offd2[private_graph_offd_size_cnt++] = i; } } } /* omp parallel */ HYPRE_Int *temp = graph_array; graph_array = graph_array2; graph_array2 = temp; temp = graph_array_offd; graph_array_offd = graph_array_offd2; graph_array_offd2 = temp; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); } /* end while */ /* hypre_printf("*** MIS iteration %d\n",iter); hypre_printf("graph_size remaining %d\n",graph_size); hypre_printf("num_cols_offd %d\n",num_cols_offd); for (i=0;i<num_variables;i++) { if(CF_marker[i]==1) hypre_printf("node %d CF %d\n",i,CF_marker[i]); }*/ /*--------------------------------------------------- * Clean up and return *---------------------------------------------------*/ hypre_TFree(measure_array, HYPRE_MEMORY_HOST); hypre_TFree(graph_array, HYPRE_MEMORY_HOST); hypre_TFree(graph_array2, HYPRE_MEMORY_HOST); hypre_TFree(graph_array_offd2, HYPRE_MEMORY_HOST); if (num_cols_offd) hypre_TFree(graph_array_offd, HYPRE_MEMORY_HOST); hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); /*if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);*/ *CF_marker_ptr = CF_marker; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PMIS] += hypre_MPI_Wtime(); #endif return (ierr); }
GB_binop__lxor_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_int64) // A.*B function (eWiseMult): GB (_AemultB_01__lxor_int64) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_int64) // A.*B function (eWiseMult): GB (_AemultB_03__lxor_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int64) // A*D function (colscale): GB (_AxD__lxor_int64) // D*A function (rowscale): GB (_DxB__lxor_int64) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_int64) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int64) // C=scalar+B GB (_bind1st__lxor_int64) // C=scalar+B' GB (_bind1st_tran__lxor_int64) // C=A+scalar GB (_bind2nd__lxor_int64) // C=A'+scalar GB (_bind2nd_tran__lxor_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_INT64 || GxB_NO_LXOR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lxor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lxor_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lxor_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
IJVector_parcsr.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * IJVector_Par interface * *****************************************************************************/ #include "_hypre_IJ_mv.h" #include "../HYPRE.h" /****************************************************************************** * * hypre_IJVectorCreatePar * * creates ParVector if necessary, and leaves a pointer to it as the * hypre_IJVector object * *****************************************************************************/ HYPRE_Int hypre_IJVectorCreatePar(hypre_IJVector *vector, HYPRE_BigInt *IJpartitioning) { MPI_Comm comm = hypre_IJVectorComm(vector); HYPRE_Int num_procs, j; HYPRE_BigInt global_n, *partitioning, jmin; hypre_MPI_Comm_size(comm, &num_procs); jmin = hypre_IJVectorGlobalFirstRow(vector); global_n = hypre_IJVectorGlobalNumRows(vector); partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); /* Shift to zero-based partitioning for ParVector object */ for (j = 0; j < 2; j++) { partitioning[j] = IJpartitioning[j] - jmin; } hypre_IJVectorObject(vector) = hypre_ParVectorCreate(comm, global_n, (HYPRE_BigInt *) partitioning); return hypre_error_flag; } /****************************************************************************** * * hypre_IJVectorDestroyPar * * frees ParVector local storage of an IJVectorPar * *****************************************************************************/ HYPRE_Int hypre_IJVectorDestroyPar(hypre_IJVector *vector) { return hypre_ParVectorDestroy((hypre_ParVector*)hypre_IJVectorObject(vector)); } /****************************************************************************** * * hypre_IJVectorInitializePar * * initializes ParVector of IJVectorPar * *****************************************************************************/ HYPRE_Int hypre_IJVectorInitializePar(hypre_IJVector *vector) { return hypre_IJVectorInitializePar_v2(vector, hypre_IJVectorMemoryLocation(vector)); } HYPRE_Int hypre_IJVectorInitializePar_v2(hypre_IJVector *vector, HYPRE_MemoryLocation memory_location) { hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector); hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector); HYPRE_BigInt *partitioning = hypre_ParVectorPartitioning(par_vector); hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_vector); HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector); HYPRE_Int my_id; MPI_Comm comm = hypre_IJVectorComm(vector); hypre_MPI_Comm_rank(comm, &my_id); HYPRE_MemoryLocation memory_location_aux = hypre_GetExecPolicy1(memory_location) == HYPRE_EXEC_HOST ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE; if (!partitioning) { if (print_level) { hypre_printf("No ParVector partitioning for initialization -- "); hypre_printf("hypre_IJVectorInitializePar\n"); } hypre_error_in_arg(1); return hypre_error_flag; } hypre_VectorSize(local_vector) = (HYPRE_Int)(partitioning[1] - partitioning[0]); hypre_ParVectorInitialize_v2(par_vector, memory_location); if (!aux_vector) { hypre_AuxParVectorCreate(&aux_vector); hypre_IJVectorTranslator(vector) = aux_vector; } hypre_AuxParVectorInitialize_v2(aux_vector, memory_location_aux); return hypre_error_flag; } /****************************************************************************** * * hypre_IJVectorSetMaxOffProcElmtsPar * *****************************************************************************/ HYPRE_Int hypre_IJVectorSetMaxOffProcElmtsPar(hypre_IJVector *vector, HYPRE_Int max_off_proc_elmts) { hypre_AuxParVector *aux_vector; aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector); if (!aux_vector) { hypre_AuxParVectorCreate(&aux_vector); hypre_IJVectorTranslator(vector) = aux_vector; } hypre_AuxParVectorMaxOffProcElmts(aux_vector) = max_off_proc_elmts; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_AuxParVectorUsrOffProcElmts(aux_vector) = max_off_proc_elmts; #endif return hypre_error_flag; } /****************************************************************************** * * hypre_IJVectorDistributePar * * takes an IJVector generated for one processor and distributes it * across many processors according to vec_starts, * if vec_starts is NULL, it distributes them evenly? * *****************************************************************************/ HYPRE_Int hypre_IJVectorDistributePar(hypre_IJVector *vector, const HYPRE_Int *vec_starts) { hypre_ParVector *old_vector = (hypre_ParVector*) hypre_IJVectorObject(vector); hypre_ParVector *par_vector; HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector); if (!old_vector) { if (print_level) { hypre_printf("old_vector == NULL -- "); hypre_printf("hypre_IJVectorDistributePar\n"); hypre_printf("**** Vector storage is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } par_vector = hypre_VectorToParVector(hypre_ParVectorComm(old_vector), hypre_ParVectorLocalVector(old_vector), (HYPRE_BigInt *)vec_starts); if (!par_vector) { if (print_level) { hypre_printf("par_vector == NULL -- "); hypre_printf("hypre_IJVectorDistributePar\n"); hypre_printf("**** Vector storage is unallocated ****\n"); } hypre_error_in_arg(1); } hypre_ParVectorDestroy(old_vector); hypre_IJVectorObject(vector) = par_vector; return hypre_error_flag; } /****************************************************************************** * * hypre_IJVectorZeroValuesPar * * zeroes all local components of an IJVectorPar * *****************************************************************************/ HYPRE_Int hypre_IJVectorZeroValuesPar(hypre_IJVector *vector) { HYPRE_Int my_id; HYPRE_BigInt vec_start, vec_stop; hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector); MPI_Comm comm = hypre_IJVectorComm(vector); HYPRE_BigInt *partitioning; hypre_Vector *local_vector; HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector); hypre_MPI_Comm_rank(comm, &my_id); /* If par_vector == NULL or partitioning == NULL or local_vector == NULL let user know of catastrophe and exit */ if (!par_vector) { if (print_level) { hypre_printf("par_vector == NULL -- "); hypre_printf("hypre_IJVectorZeroValuesPar\n"); hypre_printf("**** Vector storage is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } partitioning = hypre_ParVectorPartitioning(par_vector); local_vector = hypre_ParVectorLocalVector(par_vector); if (!partitioning) { if (print_level) { hypre_printf("partitioning == NULL -- "); hypre_printf("hypre_IJVectorZeroValuesPar\n"); hypre_printf("**** Vector partitioning is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } if (!local_vector) { if (print_level) { hypre_printf("local_vector == NULL -- "); hypre_printf("hypre_IJVectorZeroValuesPar\n"); hypre_printf("**** Vector local data is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } vec_start = partitioning[0]; vec_stop = partitioning[1]; if (vec_start > vec_stop) { if (print_level) { hypre_printf("vec_start > vec_stop -- "); hypre_printf("hypre_IJVectorZeroValuesPar\n"); hypre_printf("**** This vector partitioning should not occur ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } hypre_assert(hypre_VectorSize(local_vector) == (HYPRE_Int)(vec_stop - vec_start)); hypre_SeqVectorSetConstantValues(local_vector, 0.0); return hypre_error_flag; } /****************************************************************************** * * hypre_IJVectorSetValuesPar * * sets a potentially noncontiguous set of components of an IJVectorPar * *****************************************************************************/ HYPRE_Int hypre_IJVectorSetValuesPar(hypre_IJVector *vector, HYPRE_Int num_values, const HYPRE_BigInt *indices, const HYPRE_Complex *values) { HYPRE_Int my_id; HYPRE_Int j, k; HYPRE_BigInt i, vec_start, vec_stop; HYPRE_Complex *data; HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector); HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector); hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector); MPI_Comm comm = hypre_IJVectorComm(vector); hypre_Vector *local_vector; /* If no components are to be set, perform no checking and return */ if (num_values < 1) return 0; hypre_MPI_Comm_rank(comm, &my_id); /* If par_vector == NULL or partitioning == NULL or local_vector == NULL let user know of catastrophe and exit */ if (!par_vector) { if (print_level) { hypre_printf("par_vector == NULL -- "); hypre_printf("hypre_IJVectorSetValuesPar\n"); hypre_printf("**** Vector storage is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } local_vector = hypre_ParVectorLocalVector(par_vector); if (!IJpartitioning) { if (print_level) { hypre_printf("IJpartitioning == NULL -- "); hypre_printf("hypre_IJVectorSetValuesPar\n"); hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } if (!local_vector) { if (print_level) { hypre_printf("local_vector == NULL -- "); hypre_printf("hypre_IJVectorSetValuesPar\n"); hypre_printf("**** Vector local data is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } vec_start = IJpartitioning[0]; vec_stop = IJpartitioning[1]-1; if (vec_start > vec_stop) { if (print_level) { hypre_printf("vec_start > vec_stop -- "); hypre_printf("hypre_IJVectorSetValuesPar\n"); hypre_printf("**** This vector partitioning should not occur ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } /* Determine whether indices points to local indices only, and if not, store indices and values in auxiliary vector structure. If indices == NULL, assume that num_values components are to be set in a block starting at vec_start. NOTE: If indices == NULL off proc values are ignored!!! */ data = hypre_VectorData(local_vector); if (indices) { for (j = 0; j < num_values; j++) { i = indices[j]; if (i >= vec_start && i <= vec_stop) { k = (HYPRE_Int)( i- vec_start); data[k] = values[j]; } } } else { if (num_values > (HYPRE_Int)(vec_stop - vec_start) + 1) { if (print_level) { hypre_printf("Warning! Indices beyond local range not identified!\n "); hypre_printf("Off processor values have been ignored!\n"); } num_values = (HYPRE_Int)(vec_stop - vec_start) +1; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_values; j++) data[j] = values[j]; } return hypre_error_flag; } /****************************************************************************** * * hypre_IJVectorAddToValuesPar * * adds to a potentially noncontiguous set of IJVectorPar components * *****************************************************************************/ HYPRE_Int hypre_IJVectorAddToValuesPar(hypre_IJVector *vector, HYPRE_Int num_values, const HYPRE_BigInt *indices, const HYPRE_Complex *values) { HYPRE_Int my_id; HYPRE_Int i, j, vec_start, vec_stop; HYPRE_Complex *data; HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector); HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector); hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector); hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector); MPI_Comm comm = hypre_IJVectorComm(vector); hypre_Vector *local_vector; /* If no components are to be retrieved, perform no checking and return */ if (num_values < 1) return 0; hypre_MPI_Comm_rank(comm, &my_id); /* If par_vector == NULL or partitioning == NULL or local_vector == NULL let user know of catastrophe and exit */ if (!par_vector) { if (print_level) { hypre_printf("par_vector == NULL -- "); hypre_printf("hypre_IJVectorAddToValuesPar\n"); hypre_printf("**** Vector storage is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } local_vector = hypre_ParVectorLocalVector(par_vector); if (!IJpartitioning) { if (print_level) { hypre_printf("IJpartitioning == NULL -- "); hypre_printf("hypre_IJVectorAddToValuesPar\n"); hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } if (!local_vector) { if (print_level) { hypre_printf("local_vector == NULL -- "); hypre_printf("hypre_IJVectorAddToValuesPar\n"); hypre_printf("**** Vector local data is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } vec_start = IJpartitioning[0]; vec_stop = IJpartitioning[1]-1; if (vec_start > vec_stop) { if (print_level) { hypre_printf("vec_start > vec_stop -- "); hypre_printf("hypre_IJVectorAddToValuesPar\n"); hypre_printf("**** This vector partitioning should not occur ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } data = hypre_VectorData(local_vector); if (indices) { HYPRE_Int current_num_elmts = hypre_AuxParVectorCurrentOffProcElmts(aux_vector); HYPRE_Int max_off_proc_elmts = hypre_AuxParVectorMaxOffProcElmts(aux_vector); HYPRE_BigInt *off_proc_i = hypre_AuxParVectorOffProcI(aux_vector); HYPRE_Complex *off_proc_data = hypre_AuxParVectorOffProcData(aux_vector); HYPRE_Int k; for (j = 0; j < num_values; j++) { i = indices[j]; if (i < vec_start || i > vec_stop) { /* if elements outside processor boundaries, store in off processor stash */ if (!max_off_proc_elmts) { max_off_proc_elmts = 100; hypre_AuxParVectorMaxOffProcElmts(aux_vector) = max_off_proc_elmts; hypre_AuxParVectorOffProcI(aux_vector) = hypre_CTAlloc(HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParVectorOffProcData(aux_vector) = hypre_CTAlloc(HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_i = hypre_AuxParVectorOffProcI(aux_vector); off_proc_data = hypre_AuxParVectorOffProcData(aux_vector); } else if (current_num_elmts + 1 > max_off_proc_elmts) { max_off_proc_elmts += 10; off_proc_i = hypre_TReAlloc(off_proc_i, HYPRE_BigInt, max_off_proc_elmts, HYPRE_MEMORY_HOST); off_proc_data = hypre_TReAlloc(off_proc_data, HYPRE_Complex, max_off_proc_elmts, HYPRE_MEMORY_HOST); hypre_AuxParVectorMaxOffProcElmts(aux_vector) = max_off_proc_elmts; hypre_AuxParVectorOffProcI(aux_vector) = off_proc_i; hypre_AuxParVectorOffProcData(aux_vector) = off_proc_data; } off_proc_i[current_num_elmts] = i; off_proc_data[current_num_elmts++] = values[j]; hypre_AuxParVectorCurrentOffProcElmts(aux_vector)=current_num_elmts; } else /* local values are added to the vector */ { k = (HYPRE_Int)(i - vec_start); data[k] += values[j]; } } } else { if (num_values > (HYPRE_Int)(vec_stop - vec_start) + 1) { if (print_level) { hypre_printf("Warning! Indices beyond local range not identified!\n "); hypre_printf("Off processor values have been ignored!\n"); } num_values = (HYPRE_Int)(vec_stop - vec_start) +1; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_values; j++) data[j] += values[j]; } return hypre_error_flag; } /****************************************************************************** * * hypre_IJVectorAssemblePar * * currently tests existence of of ParVector object and its partitioning * *****************************************************************************/ HYPRE_Int hypre_IJVectorAssemblePar(hypre_IJVector *vector) { HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector); hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector); hypre_AuxParVector *aux_vector = (hypre_AuxParVector*) hypre_IJVectorTranslator(vector); HYPRE_BigInt *partitioning; MPI_Comm comm = hypre_IJVectorComm(vector); HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector); if (!par_vector) { if (print_level) { hypre_printf("par_vector == NULL -- "); hypre_printf("hypre_IJVectorAssemblePar\n"); hypre_printf("**** Vector storage is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); } partitioning = hypre_ParVectorPartitioning(par_vector); if (!IJpartitioning) { if (print_level) { hypre_printf("IJpartitioning == NULL -- "); hypre_printf("hypre_IJVectorAssemblePar\n"); hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); } if (!partitioning) { if (print_level) { hypre_printf("partitioning == NULL -- "); hypre_printf("hypre_IJVectorAssemblePar\n"); hypre_printf("**** ParVector partitioning is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); } if (aux_vector) { HYPRE_Int off_proc_elmts, current_num_elmts; HYPRE_Int max_off_proc_elmts; HYPRE_BigInt *off_proc_i; HYPRE_Complex *off_proc_data; current_num_elmts = hypre_AuxParVectorCurrentOffProcElmts(aux_vector); hypre_MPI_Allreduce(&current_num_elmts,&off_proc_elmts,1,HYPRE_MPI_INT, hypre_MPI_SUM,comm); if (off_proc_elmts) { max_off_proc_elmts=hypre_AuxParVectorMaxOffProcElmts(aux_vector); off_proc_i=hypre_AuxParVectorOffProcI(aux_vector); off_proc_data=hypre_AuxParVectorOffProcData(aux_vector); hypre_IJVectorAssembleOffProcValsPar(vector, max_off_proc_elmts, current_num_elmts, HYPRE_MEMORY_HOST, off_proc_i, off_proc_data); hypre_TFree(hypre_AuxParVectorOffProcI(aux_vector), HYPRE_MEMORY_HOST); hypre_TFree(hypre_AuxParVectorOffProcData(aux_vector), HYPRE_MEMORY_HOST); hypre_AuxParVectorMaxOffProcElmts(aux_vector) = 0; hypre_AuxParVectorCurrentOffProcElmts(aux_vector) = 0; } } return hypre_error_flag; } /****************************************************************************** * * hypre_IJVectorGetValuesPar * * get a potentially noncontiguous set of IJVectorPar components * *****************************************************************************/ HYPRE_Int hypre_IJVectorGetValuesPar(hypre_IJVector *vector, HYPRE_Int num_values, const HYPRE_BigInt *indices, HYPRE_Complex *values) { HYPRE_Int my_id; MPI_Comm comm = hypre_IJVectorComm(vector); HYPRE_BigInt *IJpartitioning = hypre_IJVectorPartitioning(vector); HYPRE_BigInt vec_start; HYPRE_BigInt vec_stop; HYPRE_BigInt jmin = hypre_IJVectorGlobalFirstRow(vector); hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector); HYPRE_Int print_level = hypre_IJVectorPrintLevel(vector); /* If no components are to be retrieved, perform no checking and return */ if (num_values < 1) { return 0; } hypre_MPI_Comm_rank(comm, &my_id); /* If par_vector == NULL or partitioning == NULL or local_vector == NULL let user know of catastrophe and exit */ if (!par_vector) { if (print_level) { hypre_printf("par_vector == NULL -- "); hypre_printf("hypre_IJVectorGetValuesPar\n"); hypre_printf("**** Vector storage is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } if (!IJpartitioning) { if (print_level) { hypre_printf("IJpartitioning == NULL -- "); hypre_printf("hypre_IJVectorGetValuesPar\n"); hypre_printf("**** IJVector partitioning is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_vector); if (!local_vector) { if (print_level) { hypre_printf("local_vector == NULL -- "); hypre_printf("hypre_IJVectorGetValuesPar\n"); hypre_printf("**** Vector local data is either unallocated or orphaned ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } vec_start = IJpartitioning[0]; vec_stop = IJpartitioning[1]; if (vec_start > vec_stop) { if (print_level) { hypre_printf("vec_start > vec_stop -- "); hypre_printf("hypre_IJVectorGetValuesPar\n"); hypre_printf("**** This vector partitioning should not occur ****\n"); } hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParVectorGetValues2(par_vector, num_values, (HYPRE_BigInt *) indices, jmin, values); return hypre_error_flag; } /****************************************************************************** * hypre_IJVectorAssembleOffProcValsPar * * This is for handling set and get values calls to off-proc. entries - it is * called from assemble. There is an alternate version for when the assumed * partition is being used. *****************************************************************************/ HYPRE_Int hypre_IJVectorAssembleOffProcValsPar( hypre_IJVector *vector, HYPRE_Int max_off_proc_elmts, HYPRE_Int current_num_elmts, HYPRE_MemoryLocation memory_location, HYPRE_BigInt *off_proc_i, HYPRE_Complex *off_proc_data) { HYPRE_Int myid; HYPRE_BigInt global_first_row, global_num_rows; HYPRE_Int i, j, in, k; HYPRE_Int proc_id, last_proc, prev_id, tmp_id; HYPRE_Int max_response_size; HYPRE_Int ex_num_contacts = 0; HYPRE_BigInt range_start, range_end; HYPRE_Int storage; HYPRE_Int indx; HYPRE_BigInt row; HYPRE_Int num_ranges, row_count; HYPRE_Int num_recvs; HYPRE_Int counter; HYPRE_BigInt upper_bound; HYPRE_Int num_real_procs; HYPRE_BigInt *row_list=NULL; HYPRE_Int *a_proc_id=NULL, *orig_order=NULL; HYPRE_Int *real_proc_id = NULL, *us_real_proc_id = NULL; HYPRE_Int *ex_contact_procs = NULL, *ex_contact_vec_starts = NULL; HYPRE_Int *recv_starts=NULL; HYPRE_BigInt *response_buf = NULL; HYPRE_Int *response_buf_starts=NULL; HYPRE_Int *num_rows_per_proc = NULL; HYPRE_Int tmp_int; HYPRE_Int obj_size_bytes, big_int_size, complex_size; HYPRE_Int first_index; void *void_contact_buf = NULL; void *index_ptr; void *recv_data_ptr; HYPRE_Complex tmp_complex; HYPRE_BigInt *ex_contact_buf=NULL; HYPRE_Complex *vector_data; HYPRE_Complex value; hypre_DataExchangeResponse response_obj1, response_obj2; hypre_ProcListElements send_proc_obj; MPI_Comm comm = hypre_IJVectorComm(vector); hypre_ParVector *par_vector = (hypre_ParVector*) hypre_IJVectorObject(vector); hypre_IJAssumedPart *apart; hypre_MPI_Comm_rank(comm, &myid); global_num_rows = hypre_IJVectorGlobalNumRows(vector); global_first_row = hypre_IJVectorGlobalFirstRow(vector); if (memory_location == HYPRE_MEMORY_DEVICE) { HYPRE_BigInt *off_proc_i_h = hypre_TAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST); HYPRE_Complex *off_proc_data_h = hypre_TAlloc(HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_i_h, off_proc_i, HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_data_h, off_proc_data, HYPRE_Complex, current_num_elmts, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); off_proc_i = off_proc_i_h; off_proc_data = off_proc_data_h; } /* call hypre_IJVectorAddToValuesParCSR directly inside this function * with one chunk of data */ HYPRE_Int off_proc_nelm_recv_cur = 0; HYPRE_Int off_proc_nelm_recv_max = 0; HYPRE_BigInt *off_proc_i_recv = NULL; HYPRE_Complex *off_proc_data_recv = NULL; HYPRE_BigInt *off_proc_i_recv_d = NULL; HYPRE_Complex *off_proc_data_recv_d = NULL; /* verify that we have created the assumed partition */ if (hypre_IJVectorAssumedPart(vector) == NULL) { hypre_IJVectorCreateAssumedPartition(vector); } apart = (hypre_IJAssumedPart*) hypre_IJVectorAssumedPart(vector); /* get the assumed processor id for each row */ a_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST); orig_order = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST); real_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST); row_list = hypre_CTAlloc(HYPRE_BigInt, current_num_elmts, HYPRE_MEMORY_HOST); if (current_num_elmts > 0) { for (i=0; i < current_num_elmts; i++) { row = off_proc_i[i]; row_list[i] = row; hypre_GetAssumedPartitionProcFromRow(comm, row, global_first_row, global_num_rows, &proc_id); a_proc_id[i] = proc_id; orig_order[i] = i; } /* now we need to find the actual order of each row - sort on row - this will result in proc ids sorted also...*/ hypre_BigQsortb2i(row_list, a_proc_id, orig_order, 0, current_num_elmts -1); /* calculate the number of contacts */ ex_num_contacts = 1; last_proc = a_proc_id[0]; for (i=1; i < current_num_elmts; i++) { if (a_proc_id[i] > last_proc) { ex_num_contacts++; last_proc = a_proc_id[i]; } } } /* now we will go through a create a contact list - need to contact assumed processors and find out who the actual row owner is - we will contact with a range (2 numbers) */ ex_contact_procs = hypre_CTAlloc(HYPRE_Int, ex_num_contacts, HYPRE_MEMORY_HOST); ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, ex_num_contacts+1, HYPRE_MEMORY_HOST); ex_contact_buf = hypre_CTAlloc(HYPRE_BigInt, ex_num_contacts*2, HYPRE_MEMORY_HOST); counter = 0; range_end = -1; for (i=0; i< current_num_elmts; i++) { if (row_list[i] > range_end) { /* assumed proc */ proc_id = a_proc_id[i]; /* end of prev. range */ if (counter > 0) ex_contact_buf[counter*2 - 1] = row_list[i-1]; /*start new range*/ ex_contact_procs[counter] = proc_id; ex_contact_vec_starts[counter] = counter*2; ex_contact_buf[counter*2] = row_list[i]; counter++; hypre_GetAssumedPartitionRowRange(comm, proc_id, global_first_row, global_num_rows, &range_start, &range_end); } } /*finish the starts*/ ex_contact_vec_starts[counter] = counter*2; /*finish the last range*/ if (counter > 0) ex_contact_buf[counter*2 - 1] = row_list[current_num_elmts - 1]; /* create response object - can use same fill response as used in the commpkg routine */ response_obj1.fill_response = hypre_RangeFillResponseIJDetermineRecvProcs; response_obj1.data1 = apart; /* this is necessary so we can fill responses*/ response_obj1.data2 = NULL; max_response_size = 6; /* 6 means we can fit 3 ranges*/ hypre_DataExchangeList(ex_num_contacts, ex_contact_procs, ex_contact_buf, ex_contact_vec_starts, sizeof(HYPRE_BigInt), sizeof(HYPRE_BigInt), &response_obj1, max_response_size, 4, comm, (void**) &response_buf, &response_buf_starts); /* now response_buf contains a proc_id followed by an upper bound for the range. */ hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_buf, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(a_proc_id, HYPRE_MEMORY_HOST); a_proc_id = NULL; /*how many ranges were returned?*/ num_ranges = response_buf_starts[ex_num_contacts]; num_ranges = num_ranges/2; prev_id = -1; j = 0; counter = 0; num_real_procs = 0; /* loop through ranges - create a list of actual processor ids*/ for (i=0; i<num_ranges; i++) { upper_bound = response_buf[i*2+1]; counter = 0; tmp_id = (HYPRE_Int)response_buf[i*2]; /* loop through row_list entries - counting how many are in the range */ while (j < current_num_elmts && row_list[j] <= upper_bound) { real_proc_id[j] = tmp_id; j++; counter++; } if (counter > 0 && tmp_id != prev_id) { num_real_procs++; } prev_id = tmp_id; } /* now we have the list of real procesors ids (real_proc_id) - and the number of distinct ones - so now we can set up data to be sent - we have HYPRE_Int and HYPRE_Complex data. (row number and value) - we will send everything as a void since we may not know the rel sizes of ints and doubles */ /* first find out how many elements to send per proc - so we can do storage */ complex_size = sizeof(HYPRE_Complex); big_int_size = sizeof(HYPRE_BigInt); obj_size_bytes = hypre_max(big_int_size, complex_size); ex_contact_procs = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); num_rows_per_proc = hypre_CTAlloc(HYPRE_Int, num_real_procs, HYPRE_MEMORY_HOST); counter = 0; if (num_real_procs > 0 ) { ex_contact_procs[0] = real_proc_id[0]; num_rows_per_proc[0] = 1; /* loop through real procs - these are sorted (row_list is sorted also)*/ for (i=1; i < current_num_elmts; i++) { if (real_proc_id[i] == ex_contact_procs[counter]) /* same processor */ { num_rows_per_proc[counter] += 1; /*another row */ } else /* new processor */ { counter++; ex_contact_procs[counter] = real_proc_id[i]; num_rows_per_proc[counter] = 1; } } } /* calculate total storage and make vec_starts arrays */ storage = 0; ex_contact_vec_starts = hypre_CTAlloc(HYPRE_Int, num_real_procs + 1, HYPRE_MEMORY_HOST); ex_contact_vec_starts[0] = -1; for (i=0; i < num_real_procs; i++) { storage += 1 + 2* num_rows_per_proc[i]; ex_contact_vec_starts[i+1] = -storage-1; /* need negative for next loop */ } /*void_contact_buf = hypre_MAlloc(storage*obj_size_bytes);*/ void_contact_buf = hypre_CTAlloc(char, storage*obj_size_bytes, HYPRE_MEMORY_HOST); index_ptr = void_contact_buf; /* step through with this index */ /* set up data to be sent to send procs */ /* for each proc, ex_contact_buf_d contains #rows, row #, data, etc. */ /* un-sort real_proc_id - we want to access data arrays in order */ us_real_proc_id = hypre_CTAlloc(HYPRE_Int, current_num_elmts, HYPRE_MEMORY_HOST); for (i=0; i < current_num_elmts; i++) { us_real_proc_id[orig_order[i]] = real_proc_id[i]; } hypre_TFree(real_proc_id, HYPRE_MEMORY_HOST); prev_id = -1; for (i=0; i < current_num_elmts; i++) { proc_id = us_real_proc_id[i]; /* can't use row list[i] - you loose the negative signs that differentiate add/set values */ row = off_proc_i[i]; /* find position of this processor */ indx = hypre_BinarySearch(ex_contact_procs, proc_id, num_real_procs); in = ex_contact_vec_starts[indx]; index_ptr = (void *) ((char *) void_contact_buf + in*obj_size_bytes); /* first time for this processor - add the number of rows to the buffer */ if (in < 0) { in = -in - 1; /* re-calc. index_ptr since in_i was negative */ index_ptr = (void *) ((char *) void_contact_buf + in*obj_size_bytes); tmp_int = num_rows_per_proc[indx]; hypre_TMemcpy( index_ptr, &tmp_int, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in++; } /* add row # */ hypre_TMemcpy( index_ptr, &row, HYPRE_BigInt,1 , HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in++; /* add value */ tmp_complex = off_proc_data[i]; hypre_TMemcpy( index_ptr, &tmp_complex, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); index_ptr = (void *) ((char *) index_ptr + obj_size_bytes); in++; /* increment the indexes to keep track of where we are - fix later */ ex_contact_vec_starts[indx] = in; } /* some clean up */ hypre_TFree(response_buf, HYPRE_MEMORY_HOST); hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST); hypre_TFree(us_real_proc_id, HYPRE_MEMORY_HOST); hypre_TFree(orig_order, HYPRE_MEMORY_HOST); hypre_TFree(row_list, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_per_proc, HYPRE_MEMORY_HOST); for (i=num_real_procs; i > 0; i--) { ex_contact_vec_starts[i] = ex_contact_vec_starts[i-1]; } ex_contact_vec_starts[0] = 0; /* now send the data */ /***********************************/ /* now get the info in send_proc_obj_d */ /* the response we expect is just a confirmation*/ response_buf = NULL; response_buf_starts = NULL; /*build the response object*/ /* use the send_proc_obj for the info kept from contacts */ /*estimate inital storage allocation */ send_proc_obj.length = 0; send_proc_obj.storage_length = num_real_procs + 5; send_proc_obj.id = NULL; /* don't care who sent it to us */ send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = storage + 20; send_proc_obj.v_elements = hypre_TAlloc(char, obj_size_bytes*send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); response_obj2.fill_response = hypre_FillResponseIJOffProcVals; response_obj2.data1 = NULL; response_obj2.data2 = &send_proc_obj; max_response_size = 0; hypre_DataExchangeList(num_real_procs, ex_contact_procs, void_contact_buf, ex_contact_vec_starts, obj_size_bytes, 0, &response_obj2, max_response_size, 5, comm, (void **) &response_buf, &response_buf_starts); /***********************************/ hypre_TFree(response_buf, HYPRE_MEMORY_HOST); hypre_TFree(response_buf_starts, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_procs, HYPRE_MEMORY_HOST); hypre_TFree(void_contact_buf, HYPRE_MEMORY_HOST); hypre_TFree(ex_contact_vec_starts, HYPRE_MEMORY_HOST); /* Now we can unpack the send_proc_objects and either set or add to the vector data */ num_recvs = send_proc_obj.length; /* alias */ recv_data_ptr = send_proc_obj.v_elements; recv_starts = send_proc_obj.vec_starts; vector_data = hypre_VectorData(hypre_ParVectorLocalVector(par_vector)); first_index = hypre_ParVectorFirstIndex(par_vector); for (i=0; i < num_recvs; i++) { indx = recv_starts[i]; /* get the number of rows for this recv */ hypre_TMemcpy( &row_count, recv_data_ptr, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; for (j=0; j < row_count; j++) /* for each row: unpack info */ { /* row # */ hypre_TMemcpy( &row, recv_data_ptr, HYPRE_BigInt, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; /* value */ hypre_TMemcpy( &value, recv_data_ptr, HYPRE_Complex, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); recv_data_ptr = (void *) ((char *)recv_data_ptr + obj_size_bytes); indx++; if (memory_location == HYPRE_MEMORY_HOST) { k = (HYPRE_Int)(row - first_index - global_first_row); vector_data[k] += value; } else { if (off_proc_nelm_recv_cur >= off_proc_nelm_recv_max) { off_proc_nelm_recv_max = 2 * (off_proc_nelm_recv_cur + 1); off_proc_i_recv = hypre_TReAlloc(off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); off_proc_data_recv = hypre_TReAlloc(off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_max, HYPRE_MEMORY_HOST); } off_proc_i_recv[off_proc_nelm_recv_cur] = row; off_proc_data_recv[off_proc_nelm_recv_cur] = value; off_proc_nelm_recv_cur ++; } } } if (memory_location == HYPRE_MEMORY_DEVICE) { off_proc_i_recv_d = hypre_TAlloc(HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); off_proc_data_recv_d = hypre_TAlloc(HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(off_proc_i_recv_d, off_proc_i_recv, HYPRE_BigInt, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_TMemcpy(off_proc_data_recv_d, off_proc_data_recv, HYPRE_Complex, off_proc_nelm_recv_cur, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_IJVectorSetAddValuesParDevice(vector, off_proc_nelm_recv_cur, off_proc_i_recv_d, off_proc_data_recv_d, "add"); #endif } hypre_TFree(send_proc_obj.v_elements, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); if (memory_location == HYPRE_MEMORY_DEVICE) { hypre_TFree(off_proc_i, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_data, HYPRE_MEMORY_HOST); } hypre_TFree(off_proc_i_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_data_recv, HYPRE_MEMORY_HOST); hypre_TFree(off_proc_i_recv_d, HYPRE_MEMORY_DEVICE); hypre_TFree(off_proc_data_recv_d, HYPRE_MEMORY_DEVICE); return hypre_error_flag; }
GB_binop__pow_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pow_fp32 // A.*B function (eWiseMult): GB_AemultB__pow_fp32 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__pow_fp32 // C+=b function (dense accum): GB_Cdense_accumb__pow_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_fp32 // C=scalar+B GB_bind1st__pow_fp32 // C=scalar+B' GB_bind1st_tran__pow_fp32 // C=A+scalar GB_bind2nd__pow_fp32 // C=A'+scalar GB_bind2nd_tran__pow_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = GB_powf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_powf (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FP32 || GxB_NO_POW_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pow_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pow_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pow_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__pow_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pow_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__pow_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = GB_powf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__pow_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = GB_powf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = GB_powf (x, aij) ; \ } GrB_Info GB_bind1st_tran__pow_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = GB_powf (aij, y) ; \ } GrB_Info GB_bind2nd_tran__pow_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bubblesort.c
// C program for implementation of Bubble sort #include <stdio.h> #include <stdlib.h> #include <stdbool.h> void swap(int *xp, int *yp) { int temp = *xp; *xp = *yp; *yp = temp; } // bubble sort void bubbleSort(int arr[], int n) { int j = 0; int i = 0; int first; #pragma omp parallel for default(none),shared(arr,i,n,first) for (i = 0; i < n-1; i++) { first = i % 2; #pragma omp parallel for default(none),shared(arr,first,n) for (j = first; j < n-1; j++) { if (arr[j] > arr[j+1]) { swap(&arr[j], &arr[j+1]); } } } } // Verify if the array is in ascending order bool isArraySorted(int arr[], int size) { bool result = true; for (int i=0; i < size - 1; i++) if(arr[i] > arr[i+1]) result = false; return result; } // Driver program to test above functions int main() { int i,n = 10000; int *arr = (int*) malloc(n*sizeof(int)); #pragma omp parallel for for(i=0; i < n; i++) arr[i] = rand()%n; bubbleSort(arr, n); printf("Is array sorted?: \n"); fputs(isArraySorted(arr, n) ? "true\n" : "false\n", stdout); return 0; }
lastpass_fmt_plug.c
/* LastPass offline cracker patch for JtR. Hacked together during January of 2013 by * Dhiru Kholia <dhiru.kholia at gmail.com>. * * All the hard work was done by Milen (author of hashkill). * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_lastpass; #elif FMT_REGISTERS_H john_register_one(&fmt_lastpass); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "johnswap.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include <openssl/aes.h> #include "pbkdf2_hmac_sha256.h" #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 64 #endif #include "memdbg.h" #define FORMAT_LABEL "lp" #define FORMAT_NAME "LastPass offline" #ifdef MMX_COEF_SHA256 #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 16 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_ALIGN sizeof(int) #ifdef MMX_COEF_SHA256 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests lastpass_tests[] = { {"$lp$hackme@mailinator.com$6f5d8cec3615fc9ac7ba2e0569bce4f5", "strongpassword"}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)]; static struct custom_salt { int iterations; int salt_length; unsigned char salt[32]; } *cur_salt; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; if (strncmp(ciphertext, "$lp$", 4)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 4; if ((p = strtok(ctcopy, "$")) == NULL) /* email */ goto err; if (strlen(p) > 32) goto err; if ((p = strtok(NULL, "*")) == NULL) /* hash */ goto err; if (strlen(p) != 32) goto err; if (!ishex(p)) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 4; /* skip over "$lp$" */ p = strtok(ctcopy, "$"); strncpy((char*)cs.salt, p, 32); cs.salt_length = strlen((char*)p); MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { AES_KEY akey; #ifdef MMX_COEF_SHA256 int lens[MAX_KEYS_PER_CRYPT], i; unsigned char *pin[MAX_KEYS_PER_CRYPT]; ARCH_WORD_32 key[MAX_KEYS_PER_CRYPT][8]; union { ARCH_WORD_32 *pout[MAX_KEYS_PER_CRYPT]; unsigned char *poutc; } x; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[i+index]); pin[i] = (unsigned char*)saved_key[i+index]; x.pout[i] = key[i]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->salt_length, 500, &(x.poutc), 32, 0); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { memset(&akey, 0, sizeof(AES_KEY)); AES_set_encrypt_key((unsigned char*)key[i], 256, &akey); AES_ecb_encrypt((unsigned char*)"lastpass rocks\x02\x02", (unsigned char*)crypt_out[i+index], &akey, AES_ENCRYPT); } #else unsigned char key[32]; pbkdf2_sha256((unsigned char*)saved_key[index], strlen(saved_key[index]), cur_salt->salt, cur_salt->salt_length, 500, key, 32, 0); #if !ARCH_LITTLE_ENDIAN { int i; for (i = 0; i < 8; ++i) { ((ARCH_WORD_32*)key)[i] = JOHNSWAP(((ARCH_WORD_32*)key)[i]); } } #endif memset(&akey, 0, sizeof(AES_KEY)); AES_set_encrypt_key((unsigned char*)key, 256, &akey); AES_ecb_encrypt((unsigned char*)"lastpass rocks\x02\x02", (unsigned char*)crypt_out[index], &akey, AES_ENCRYPT); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], BINARY_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void lastpass_set_key(char *key, int index) { int saved_key_length = strlen(key); if (saved_key_length > PLAINTEXT_LENGTH) saved_key_length = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_key_length); saved_key[index][saved_key_length] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_lastpass = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif lastpass_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, set_salt, lastpass_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__pow_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pow_uint8 // A.*B function (eWiseMult): GB_AemultB__pow_uint8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__pow_uint8 // C+=b function (dense accum): GB_Cdense_accumb__pow_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_uint8 // C=scalar+B GB_bind1st__pow_uint8 // C=scalar+B' GB_bind1st_tran__pow_uint8 // C=A+scalar GB_bind2nd__pow_uint8 // C=A'+scalar GB_bind2nd_tran__pow_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_pow_uint8 (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_pow_uint8 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT8 || GxB_NO_POW_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pow_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pow_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pow_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pow_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pow_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__pow_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t bij = Bx [p] ; Cx [p] = GB_pow_uint8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__pow_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; Cx [p] = GB_pow_uint8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_uint8 (x, aij) ; \ } GrB_Info GB_bind1st_tran__pow_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_uint8 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__pow_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ast-dump-openmp-distribute.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp distribute for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp distribute for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp distribute collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp distribute collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp distribute collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-distribute.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:4:1, col:23> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:10:1, col:23> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:17:1, col:35> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:24, col:34> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:33> 'int' // CHECK-NEXT: | | |-value: Int 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:33> 'int' 1 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPDistributeDirective {{.*}} <line:24:1, col:35> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:24, col:34> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:33> 'int' // CHECK-NEXT: | | |-value: Int 2 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:33> 'int' 2 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPDistributeDirective {{.*}} <line:31:1, col:35> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:24, col:34> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:33> 'int' // CHECK-NEXT: | |-value: Int 2 // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:33> 'int' 2 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-distribute.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
Grid.h
#pragma once #include "GridTypes.h" #include "ScalarField.h" #include "Vectors.h" namespace pfc { enum InterpolationType { Interpolation_CIC, Interpolation_TSC, Interpolation_SecondOrder, Interpolation_FourthOrder, Interpolation_PCS }; template<typename Data, GridTypes gridType> class Grid { public: Grid(const Int3 & _numInternalCells, FP _dt, const FP3 & minCoords, const FP3 & _steps, const Int3 & globalGridDims); Grid(const Int3 & _numAllCells, FP _dt, const Int3 & globalGridDims); // for complex grid const FP3 BxPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftBx; } const FP3 ByPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftBy; } const FP3 BzPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftBz; } const FP3 ExPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJx; } const FP3 EyPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJy; } const FP3 EzPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJz; } const FP3 JxPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJx; } const FP3 JyPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJy; } const FP3 JzPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJz; } void getFieldsXYZ(FP x, FP y, FP z, FP3 & e, FP3 & b) const { FP3 coords(x, y, z); getFields(coords, e, b); } void getFields(const FP3& coords, FP3 & e, FP3 & b) const { (this->*interpolationFields)(coords, e, b); } virtual FP3 getJ(const FP3& coords) const; virtual FP3 getE(const FP3& coords) const; virtual FP3 getB(const FP3& coords) const; void getFieldsCIC(const FP3& coords, FP3 & e, FP3 & b) const; void getFieldsTSC(const FP3& coords, FP3 & e, FP3 & b) const; void getFieldsSecondOrder(const FP3& coords, FP3 & e, FP3 & b) const; void getFieldsFourthOrder(const FP3& coords, FP3 & e, FP3 & b) const; void getFieldsPCS(const FP3& coords, FP3 & e, FP3 & b) const; FP getEx(const FP3& coords) const { return (this->*interpolationEx)(coords); } FP getEy(const FP3& coords) const { return (this->*interpolationEy)(coords); } FP getEz(const FP3& coords) const { return (this->*interpolationEz)(coords); } FP getBx(const FP3& coords) const { return (this->*interpolationBx)(coords); } FP getBy(const FP3& coords) const { return (this->*interpolationBy)(coords); } FP getBz(const FP3& coords) const { return (this->*interpolationBz)(coords); } FP getJx(const FP3& coords) const { return (this->*interpolationJx)(coords); } FP getJy(const FP3& coords) const { return (this->*interpolationJy)(coords); } FP getJz(const FP3& coords) const { return (this->*interpolationJz)(coords); } FP getExCIC(const FP3& coords) const { return getFieldCIC(coords, Ex, shiftEJx); } FP getEyCIC(const FP3& coords) const { return getFieldCIC(coords, Ey, shiftEJy); } FP getEzCIC(const FP3& coords) const { return getFieldCIC(coords, Ez, shiftEJz); } FP getBxCIC(const FP3& coords) const { return getFieldCIC(coords, Bx, shiftBx); } FP getByCIC(const FP3& coords) const { return getFieldCIC(coords, By, shiftBy); } FP getBzCIC(const FP3& coords) const { return getFieldCIC(coords, Bz, shiftBz); } FP getJxCIC(const FP3& coords) const { return getFieldCIC(coords, Jx, shiftEJx); } FP getJyCIC(const FP3& coords) const { return getFieldCIC(coords, Jy, shiftEJy); } FP getJzCIC(const FP3& coords) const { return getFieldCIC(coords, Jz, shiftEJz); } FP getExTSC(const FP3& coords) const { return getFieldTSC(coords, Ex, shiftEJx); } FP getEyTSC(const FP3& coords) const { return getFieldTSC(coords, Ey, shiftEJy); } FP getEzTSC(const FP3& coords) const { return getFieldTSC(coords, Ez, shiftEJz); } FP getBxTSC(const FP3& coords) const { return getFieldTSC(coords, Bx, shiftBx); } FP getByTSC(const FP3& coords) const { return getFieldTSC(coords, By, shiftBy); } FP getBzTSC(const FP3& coords) const { return getFieldTSC(coords, Bz, shiftBz); } FP getJxTSC(const FP3& coords) const { return getFieldTSC(coords, Jx, shiftEJx); } FP getJyTSC(const FP3& coords) const { return getFieldTSC(coords, Jy, shiftEJy); } FP getJzTSC(const FP3& coords) const { return getFieldTSC(coords, Jz, shiftEJz); } FP getExSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Ex, shiftEJx); } FP getEySecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Ey, shiftEJy); } FP getEzSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Ez, shiftEJz); } FP getBxSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Bx, shiftBx); } FP getBySecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, By, shiftBy); } FP getBzSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Bz, shiftBz); } FP getJxSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Jx, shiftEJx); } FP getJySecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Jy, shiftEJy); } FP getJzSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Jz, shiftEJz); } FP getExFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Ex, shiftEJx); } FP getEyFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Ey, shiftEJy); } FP getEzFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Ez, shiftEJz); } FP getBxFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Bx, shiftBx); } FP getByFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, By, shiftBy); } FP getBzFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Bz, shiftBz); } FP getJxFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Jx, shiftEJx); } FP getJyFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Jy, shiftEJy); } FP getJzFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Jz, shiftEJz); } FP getExPCS(const FP3& coords) const { return getFieldPCS(coords, Ex, shiftEJx); } FP getEyPCS(const FP3& coords) const { return getFieldPCS(coords, Ey, shiftEJy); } FP getEzPCS(const FP3& coords) const { return getFieldPCS(coords, Ez, shiftEJz); } FP getBxPCS(const FP3& coords) const { return getFieldPCS(coords, Bx, shiftBx); } FP getByPCS(const FP3& coords) const { return getFieldPCS(coords, By, shiftBy); } FP getBzPCS(const FP3& coords) const { return getFieldPCS(coords, Bz, shiftBz); } FP getJxPCS(const FP3& coords) const { return getFieldPCS(coords, Jx, shiftEJx); } FP getJyPCS(const FP3& coords) const { return getFieldPCS(coords, Jy, shiftEJy); } FP getJzPCS(const FP3& coords) const { return getFieldPCS(coords, Jz, shiftEJz); } /*void dumpE(FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx); void dumpB(FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx); void dumpCurrents(FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx); void loadE(const FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx); void loadB(const FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx); void loadCurrents(const FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx);*/ /* Make all current density values zero. */ void zeroizeJ(); const Int3 getNumExternalLeftCells() const { Int3 result(2, 2, 2); for (int d = 0; d < 3; d++) if (globalGridDims[d] == 1) result[d] = 0; return result; } const Int3 getNumExternalRightCells() const { return getNumExternalLeftCells(); } void setInterpolationType(InterpolationType type); InterpolationType getInterpolationType() const; const Int3 globalGridDims; // important to initialize it first const FP3 steps; const FP dt; const Int3 numInternalCells; const Int3 numCells; const FP3 origin; const int dimensionality; // Time diffence between b and e const FP timeShiftE, timeShiftB, timeShiftJ; ScalarField<Data> Ex, Ey, Ez, Bx, By, Bz, Jx, Jy, Jz; private: // 3d shifts of the field in the cell const FP3 shiftEJx, shiftEJy, shiftEJz, shiftBx, shiftBy, shiftBz; /* Get grid index and normalized internal coords in [0, 0, 0]..(1, 1, 1) for given physical coords and shift. */ void getGridCoords(const FP3 & coords, const FP3 & shift, Int3 & idx, FP3 & internalCoords) const { idx.x = (int)((coords.x - origin.x - shift.x) / steps.x); idx.y = (int)((coords.y - origin.y - shift.y) / steps.y); idx.z = (int)((coords.z - origin.z - shift.z) / steps.z); internalCoords = (coords - baseCoords(idx.x, idx.y, idx.z) - shift) / steps; } void getClosestGridCoords(const FP3 & coords, const FP3 & shift, Int3 & idx, FP3 & internalCoords) const { idx.x = (int)((coords.x - origin.x - shift.x) / steps.x + 0.5); idx.y = (int)((coords.y - origin.y - shift.y) / steps.y + 0.5); idx.z = (int)((coords.z - origin.z - shift.z) / steps.z + 0.5); internalCoords = (coords - baseCoords(idx.x, idx.y, idx.z) - shift) / steps; } /* Get base coords of element (i, j, k) so that its real coords are base coords + corresponding shift. */ const FP3 baseCoords(int i, int j, int k) const { return origin + FP3(i, j, k) * steps; } FP getFieldCIC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; FP getFieldTSC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; FP getFieldSecondOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; FP getFieldFourthOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; FP getFieldPCS(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; InterpolationType interpolationType; void (Grid::*interpolationFields)(const FP3&, FP3&, FP3&) const; FP (Grid::*interpolationEx)(const FP3&) const; FP(Grid::*interpolationEy)(const FP3&) const; FP(Grid::*interpolationEz)(const FP3&) const; FP(Grid::*interpolationBx)(const FP3&) const; FP(Grid::*interpolationBy)(const FP3&) const; FP(Grid::*interpolationBz)(const FP3&) const; FP(Grid::*interpolationJx)(const FP3&) const; FP(Grid::*interpolationJy)(const FP3&) const; FP(Grid::*interpolationJz)(const FP3&) const; }; typedef Grid<FP, GridTypes::YeeGridType> YeeGrid; typedef Grid<FP, GridTypes::StraightGridType> SimpleGrid; typedef Grid<FP, GridTypes::PSTDGridType> PSTDGrid; typedef Grid<FP, GridTypes::PSATDGridType> PSATDGrid; template <> inline Grid<FP, GridTypes::YeeGridType>::Grid(const Int3 & _numCells, FP _dt, const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), steps(_steps), dt(_dt), numInternalCells(_numCells), numCells(numInternalCells + getNumExternalLeftCells() + getNumExternalRightCells()), Ex(numCells), Ey(numCells), Ez(numCells), Bx(numCells), By(numCells), Bz(numCells), Jx(numCells), Jy(numCells), Jz(numCells), shiftEJx(FP3(0, 0.5, 0.5) * steps), shiftEJy(FP3(0.5, 0, 0.5) * steps), shiftEJz(FP3(0.5, 0.5, 0) * steps), shiftBx(FP3(0.5, 0, 0) * steps), shiftBy(FP3(0, 0.5, 0) * steps), shiftBz(FP3(0, 0, 0.5) * steps), timeShiftE(0.0), timeShiftB(dt/2), timeShiftJ(0.0), origin(minCoords.x - steps.x * getNumExternalLeftCells().x, minCoords.y - steps.y * getNumExternalLeftCells().y, minCoords.z - steps.z * getNumExternalLeftCells().z), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } template<> inline Grid<FP, GridTypes::StraightGridType>::Grid(const Int3 & _numInternalCells, FP _dt, const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), steps(_steps), dt(_dt), numInternalCells(_numInternalCells), numCells(numInternalCells + getNumExternalLeftCells() + getNumExternalRightCells()), Ex(numCells), Ey(numCells), Ez(numCells), Bx(numCells), By(numCells), Bz(numCells), Jx(numCells), Jy(numCells), Jz(numCells), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), timeShiftE(0.0), timeShiftB(0.0), timeShiftJ(0.0), origin(minCoords.x - steps.x * getNumExternalLeftCells().x, minCoords.y - steps.y * getNumExternalLeftCells().y, minCoords.z - steps.z * getNumExternalLeftCells().z), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } // SPECTRAL GRIDS template<> inline Grid<complexFP, GridTypes::PSTDGridType>::Grid(const Int3 & _numAllCells, FP _dt, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), dt(_dt), numInternalCells(_numAllCells), numCells(numInternalCells), Ex(numCells), Ey(numCells), Ez(numCells), Bx(numCells), By(numCells), Bz(numCells), Jx(numCells), Jy(numCells), Jz(numCells), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), timeShiftE(0.0), timeShiftB(dt / 2), timeShiftJ(dt / 2), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { } template<> inline Grid<FP, GridTypes::PSTDGridType>::Grid(const Int3 & _numInternalCells, FP _dt, const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), steps(_steps), dt(_dt), numInternalCells(_numInternalCells), numCells(numInternalCells + getNumExternalLeftCells() + getNumExternalRightCells()), Ex(numCells), Ey(numCells), Ez(numCells), Bx(numCells), By(numCells), Bz(numCells), Jx(numCells), Jy(numCells), Jz(numCells), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), timeShiftE(0.0), timeShiftB(dt / 2), timeShiftJ(dt / 2), origin(minCoords.x - steps.x * getNumExternalLeftCells().x, minCoords.y - steps.y * getNumExternalLeftCells().y, minCoords.z - steps.z * getNumExternalLeftCells().z), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } template<> inline Grid<complexFP, GridTypes::PSATDGridType>::Grid(const Int3 & _numInternalCells, FP _dt, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), dt(_dt), numInternalCells(_numInternalCells), numCells(numInternalCells), Ex(numCells), Ey(numCells), Ez(numCells), Bx(numCells), By(numCells), Bz(numCells), Jx(numCells), Jy(numCells), Jz(numCells), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), timeShiftE(0.0), timeShiftB(0.0), timeShiftJ(dt / 2), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { } template<> inline Grid<FP, GridTypes::PSATDGridType>::Grid(const Int3 & _numInternalCells, FP _dt, const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), steps(_steps), dt(_dt), numInternalCells(_numInternalCells), numCells(numInternalCells + getNumExternalLeftCells() + getNumExternalRightCells()), Ex(numCells), Ey(numCells), Ez(numCells), Bx(numCells), By(numCells), Bz(numCells), Jx(numCells), Jy(numCells), Jz(numCells), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), timeShiftE(0.0), timeShiftB(0.0), timeShiftJ(dt / 2), origin(minCoords.x - steps.x * getNumExternalLeftCells().x, minCoords.y - steps.y * getNumExternalLeftCells().y, minCoords.z - steps.z * getNumExternalLeftCells().z), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } // end SPECTRAL GRIDS template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldCIC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getGridCoords(coords, shift, idx, internalCoords); return field.interpolateCIC(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldTSC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shift, idx, internalCoords); return field.interpolateTSC(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldSecondOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shift, idx, internalCoords); return field.interpolateSecondOrder(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldFourthOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shift, idx, internalCoords); return field.interpolateFourthOrder(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldPCS(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getGridCoords(coords, shift, idx, internalCoords); return field.interpolatePCS(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsCIC(const FP3& coords, FP3 & e, FP3 & b) const { /* For each component of E and B get grid index and internal coords, use it as base index and coefficients of interpolation. */ Int3 idx; FP3 internalCoords; getGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateCIC(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsTSC(const FP3& coords, FP3 & e, FP3 & b) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateTSC(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsSecondOrder(const FP3& coords, FP3 & e, FP3 & b) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateSecondOrder(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsFourthOrder(const FP3& coords, FP3 & e, FP3 & b) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateFourthOrder(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsPCS(const FP3& coords, FP3 & e, FP3 & b) const { Int3 idx; FP3 internalCoords; getGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolatePCS(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP3 Grid<Data, gT>::getJ(const FP3& coords) const { /* For each component of J get grid index and internal coords, use it as base index and coefficients of interpolation. */ Int3 idx; FP3 internalCoords; FP3 j; getGridCoords(coords, shiftEJx, idx, internalCoords); j.x = Jx.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJy, idx, internalCoords); j.y = Jy.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJz, idx, internalCoords); j.z = Jz.interpolateCIC(idx, internalCoords); return j; } template< typename Data, GridTypes gT> inline FP3 Grid<Data, gT>::getE(const FP3& coords) const { /* For each component of J get grid index and internal coords, use it as base index and coefficients of interpolation. */ Int3 idx; FP3 internalCoords; FP3 e; getGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateCIC(idx, internalCoords); return e; } template< typename Data, GridTypes gT> inline FP3 Grid<Data, gT>::getB(const FP3& coords) const { /* For each component of J get grid index and internal coords, use it as base index and coefficients of interpolation. */ Int3 idx; FP3 internalCoords; FP3 b; getGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateCIC(idx, internalCoords); return b; } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::zeroizeJ() { Jx.zeroize(); Jy.zeroize(); Jz.zeroize(); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::setInterpolationType(InterpolationType type) { interpolationType = type; switch (interpolationType) { case Interpolation_CIC: interpolationFields = &Grid<Data, gT>::getFieldsCIC; interpolationEx = &Grid<Data, gT>::getExCIC; interpolationEy = &Grid<Data, gT>::getEyCIC; interpolationEz = &Grid<Data, gT>::getEzCIC; interpolationBx = &Grid<Data, gT>::getBxCIC; interpolationBy = &Grid<Data, gT>::getByCIC; interpolationBz = &Grid<Data, gT>::getBzCIC; interpolationJx = &Grid<Data, gT>::getJxCIC; interpolationJy = &Grid<Data, gT>::getJyCIC; interpolationJz = &Grid<Data, gT>::getJzCIC; break; case Interpolation_TSC: interpolationFields = &Grid<Data, gT>::getFieldsTSC; interpolationEx = &Grid<Data, gT>::getExTSC; interpolationEy = &Grid<Data, gT>::getEyTSC; interpolationEz = &Grid<Data, gT>::getEzTSC; interpolationBx = &Grid<Data, gT>::getBxTSC; interpolationBy = &Grid<Data, gT>::getByTSC; interpolationBz = &Grid<Data, gT>::getBzTSC; interpolationJx = &Grid<Data, gT>::getJxTSC; interpolationJy = &Grid<Data, gT>::getJyTSC; interpolationJz = &Grid<Data, gT>::getJzTSC; break; case Interpolation_PCS: interpolationFields = &Grid<Data, gT>::getFieldsPCS; interpolationEx = &Grid<Data, gT>::getExPCS; interpolationEy = &Grid<Data, gT>::getEyPCS; interpolationEz = &Grid<Data, gT>::getEzPCS; interpolationBx = &Grid<Data, gT>::getBxPCS; interpolationBy = &Grid<Data, gT>::getByPCS; interpolationBz = &Grid<Data, gT>::getBzPCS; interpolationJx = &Grid<Data, gT>::getJxPCS; interpolationJy = &Grid<Data, gT>::getJyPCS; interpolationJz = &Grid<Data, gT>::getJzPCS; break; case Interpolation_SecondOrder: interpolationFields = &Grid<Data, gT>::getFieldsSecondOrder; interpolationEx = &Grid<Data, gT>::getExSecondOrder; interpolationEy = &Grid<Data, gT>::getEySecondOrder; interpolationEz = &Grid<Data, gT>::getEzSecondOrder; interpolationBx = &Grid<Data, gT>::getBxSecondOrder; interpolationBy = &Grid<Data, gT>::getBySecondOrder; interpolationBz = &Grid<Data, gT>::getBzSecondOrder; interpolationJx = &Grid<Data, gT>::getJxSecondOrder; interpolationJy = &Grid<Data, gT>::getJySecondOrder; interpolationJz = &Grid<Data, gT>::getJzSecondOrder; break; case Interpolation_FourthOrder: interpolationFields = &Grid<Data, gT>::getFieldsFourthOrder; interpolationEx = &Grid<Data, gT>::getExFourthOrder; interpolationEy = &Grid<Data, gT>::getEyFourthOrder; interpolationEz = &Grid<Data, gT>::getEzFourthOrder; interpolationBx = &Grid<Data, gT>::getBxFourthOrder; interpolationBy = &Grid<Data, gT>::getByFourthOrder; interpolationBz = &Grid<Data, gT>::getBzFourthOrder; interpolationJx = &Grid<Data, gT>::getJxFourthOrder; interpolationJy = &Grid<Data, gT>::getJyFourthOrder; interpolationJz = &Grid<Data, gT>::getJzFourthOrder; break; } } template<typename Data, GridTypes gT> inline InterpolationType Grid<Data, gT>::getInterpolationType() const { return interpolationType; } /*template<> inline void Grid<FP, YeeGridType>::dumpB(FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; ++i) for (int j = 0; j < numCells.y; ++j) for (int k = 0; k < numCells.z; ++k) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); b[idx].x = Bx(nodeIdx); b[idx].y = By(nodeIdx); b[idx].z = Bz(nodeIdx); } } template<> inline void Grid<FP, YeeGridType>::dumpE(FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; ++i) for (int j = 0; j < numCells.y; ++j) for (int k = 0; k < numCells.z; ++k) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); e[idx].x = Ex(nodeIdx); e[idx].y = Ey(nodeIdx); e[idx].z = Ez(nodeIdx); } } template<> inline void Grid<FP, YeeGridType>::dumpCurrents(FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; ++i) for (int j = 0; j < numCells.y; ++j) for (int k = 0; k < numCells.z; ++k) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); currents[idx].x = Jx(nodeIdx); currents[idx].y = Jy(nodeIdx); currents[idx].z = Jz(nodeIdx); idx++; } } template<> inline void Grid<FP, YeeGridType>::loadE(const FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; i++) for (int j = 0; j < numCells.y; j++) for (int k = 0; k < numCells.z; k++) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); Ex(nodeIdx) = e[idx].x; Ey(nodeIdx) = e[idx].y; Ez(nodeIdx) = e[idx].z; } } template<> inline void Grid<FP, YeeGridType>::loadB(const FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; ++i) for (int j = 0; j < numCells.y; ++j) for (int k = 0; k < numCells.z; ++k) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); Bx(nodeIdx) = b[idx].x; By(nodeIdx) = b[idx].y; Bz(nodeIdx) = b[idx].z; } } template<> inline void Grid<FP, YeeGridType>::loadCurrents(const FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; i++) for (int j = 0; j < numCells.y; j++) for (int k = 0; k < numCells.z; k++) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); Jx(nodeIdx) = currents[idx].x; Jy(nodeIdx) = currents[idx].y; Jz(nodeIdx) = currents[idx].z; } }*/ }
matrix.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M AAA TTTTT RRRR IIIII X X % % MM MM A A T R R I X X % % M M M AAAAA T RRRR I X % % M M A A T R R I X X % % M M A A T R R IIIII X X % % % % % % MagickCore Matrix Methods % % % % Software Design % % Cristy % % August 2007 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/matrix-private.h" #include "MagickCore/memory_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" /* Typedef declaration. */ struct _MatrixInfo { CacheType type; size_t columns, rows, stride; MagickSizeType length; MagickBooleanType mapped, synchronize; char path[MagickPathExtent]; int file; void *elements; SemaphoreInfo *semaphore; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMatrixInfo() allocates the ImageInfo structure. % % The format of the AcquireMatrixInfo method is: % % MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows, % const size_t stride,ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: the matrix columns. % % o rows: the matrix rows. % % o stride: the matrix stride. % % o exception: return any errors or warnings in this structure. % */ #if defined(SIGBUS) static void MatrixSignalHandler(int status) { ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache"); } #endif static inline MagickOffsetType WriteMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) LONG_MAX)); #else count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) LONG_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PWRITE) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } static MagickBooleanType SetMatrixExtent( MatrixInfo *magick_restrict matrix_info,MagickSizeType length) { MagickOffsetType count, extent, offset; if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) return(MagickTrue); extent=(MagickOffsetType) length-1; count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) ""); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (matrix_info->synchronize != MagickFalse) (void) posix_fallocate(matrix_info->file,offset+1,extent-offset); #endif #if defined(SIGBUS) (void) signal(SIGBUS,MatrixSignalHandler); #endif return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue); } MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns, const size_t rows,const size_t stride,ExceptionInfo *exception) { char *synchronize; MagickBooleanType status; MatrixInfo *matrix_info; matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info)); if (matrix_info == (MatrixInfo *) NULL) return((MatrixInfo *) NULL); (void) memset(matrix_info,0,sizeof(*matrix_info)); matrix_info->signature=MagickCoreSignature; matrix_info->columns=columns; matrix_info->rows=rows; matrix_info->stride=stride; matrix_info->semaphore=AcquireSemaphoreInfo(); synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { matrix_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } matrix_info->length=(MagickSizeType) columns*rows*stride; if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=MemoryCache; status=AcquireMagickResource(AreaResource,matrix_info->length); if ((status != MagickFalse) && (matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length))) { status=AcquireMagickResource(MemoryResource,matrix_info->length); if (status != MagickFalse) { matrix_info->mapped=MagickFalse; matrix_info->elements=AcquireMagickMemory((size_t) matrix_info->length); if (matrix_info->elements == NULL) { matrix_info->mapped=MagickTrue; matrix_info->elements=MapBlob(-1,IOMode,0,(size_t) matrix_info->length); } if (matrix_info->elements == (unsigned short *) NULL) RelinquishMagickResource(MemoryResource,matrix_info->length); } } matrix_info->file=(-1); if (matrix_info->elements == (unsigned short *) NULL) { status=AcquireMagickResource(DiskResource,matrix_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=DiskCache; matrix_info->file=AcquireUniqueFileResource(matrix_info->path); if (matrix_info->file == -1) return(DestroyMatrixInfo(matrix_info)); status=AcquireMagickResource(MapResource,matrix_info->length); if (status != MagickFalse) { status=SetMatrixExtent(matrix_info,matrix_info->length); if (status != MagickFalse) matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0, (size_t) matrix_info->length); if (matrix_info->elements != NULL) matrix_info->type=MapCache; else RelinquishMagickResource(MapResource,matrix_info->length); } } return(matrix_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMagickMatrix() allocates and returns a matrix in the form of an % array of pointers to an array of doubles, with all values pre-set to zero. % % This used to generate the two dimensional matrix, and vectors required % for the GaussJordanElimination() method below, solving some system of % simultanious equations. % % The format of the AcquireMagickMatrix method is: % % double **AcquireMagickMatrix(const size_t number_rows, % const size_t size) % % A description of each parameter follows: % % o number_rows: the number pointers for the array of pointers % (first dimension). % % o size: the size of the array of doubles each pointer points to % (second dimension). % */ MagickExport double **AcquireMagickMatrix(const size_t number_rows, const size_t size) { double **matrix; ssize_t i, j; matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix)); if (matrix == (double **) NULL) return((double **) NULL); for (i=0; i < (ssize_t) number_rows; i++) { matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i])); if (matrix[i] == (double *) NULL) { for (j=0; j < i; j++) matrix[j]=(double *) RelinquishMagickMemory(matrix[j]); matrix=(double **) RelinquishMagickMemory(matrix); return((double **) NULL); } for (j=0; j < (ssize_t) size; j++) matrix[i][j]=0.0; } return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMatrixInfo() dereferences a matrix, deallocating memory associated % with the matrix. % % The format of the DestroyImage method is: % % MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); LockSemaphoreInfo(matrix_info->semaphore); switch (matrix_info->type) { case MemoryCache: { if (matrix_info->mapped == MagickFalse) matrix_info->elements=RelinquishMagickMemory(matrix_info->elements); else { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=(unsigned short *) NULL; } RelinquishMagickResource(MemoryResource,matrix_info->length); break; } case MapCache: { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=NULL; RelinquishMagickResource(MapResource,matrix_info->length); } case DiskCache: { if (matrix_info->file != -1) (void) close(matrix_info->file); (void) RelinquishUniqueFileResource(matrix_info->path); RelinquishMagickResource(DiskResource,matrix_info->length); break; } default: break; } UnlockSemaphoreInfo(matrix_info->semaphore); RelinquishSemaphoreInfo(&matrix_info->semaphore); return((MatrixInfo *) RelinquishMagickMemory(matrix_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G a u s s J o r d a n E l i m i n a t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussJordanElimination() returns a matrix in reduced row echelon form, % while simultaneously reducing and thus solving the augumented results % matrix. % % See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % % The format of the GaussJordanElimination method is: % % MagickBooleanType GaussJordanElimination(double **matrix, % double **vectors,const size_t rank,const size_t number_vectors) % % A description of each parameter follows: % % o matrix: the matrix to be reduced, as an 'array of row pointers'. % % o vectors: the additional matrix argumenting the matrix for row reduction. % Producing an 'array of column vectors'. % % o rank: The size of the matrix (both rows and columns). % Also represents the number terms that need to be solved. % % o number_vectors: Number of vectors columns, argumenting the above matrix. % Usally 1, but can be more for more complex equation solving. % % Note that the 'matrix' is given as a 'array of row pointers' of rank size. % That is values can be assigned as matrix[row][column] where 'row' is % typically the equation, and 'column' is the term of the equation. % That is the matrix is in the form of a 'row first array'. % % However 'vectors' is a 'array of column pointers' which can have any number % of columns, with each column array the same 'rank' size as 'matrix'. % % This allows for simpler handling of the results, especially is only one % column 'vector' is all that is required to produce the desired solution. % % For example, the 'vectors' can consist of a pointer to a simple array of % doubles. when only one set of simultanious equations is to be solved from % the given set of coefficient weighted terms. % % double **matrix = AcquireMagickMatrix(8UL,8UL); % double coefficents[8]; % ... % GaussJordanElimination(matrix, &coefficents, 8UL, 1UL); % % However by specifing more 'columns' (as an 'array of vector columns', % you can use this function to solve a set of 'separable' equations. % % For example a distortion function where u = U(x,y) v = V(x,y) % And the functions U() and V() have separate coefficents, but are being % generated from a common x,y->u,v data set. % % Another example is generation of a color gradient from a set of colors at % specific coordients, such as a list x,y -> r,g,b,a. % % You can also use the 'vectors' to generate an inverse of the given 'matrix' % though as a 'column first array' rather than a 'row first array'. For % details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % */ MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix, double **vectors,const size_t rank,const size_t number_vectors) { #define GaussJordanSwap(x,y) \ { \ if ((x) != (y)) \ { \ (x)+=(y); \ (y)=(x)-(y); \ (x)=(x)-(y); \ } \ } double max, scale; ssize_t i, j, k; ssize_t column, *columns, *pivots, row, *rows; columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns)); rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows)); pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots)); if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) || (pivots == (ssize_t *) NULL)) { if (pivots != (ssize_t *) NULL) pivots=(ssize_t *) RelinquishMagickMemory(pivots); if (columns != (ssize_t *) NULL) columns=(ssize_t *) RelinquishMagickMemory(columns); if (rows != (ssize_t *) NULL) rows=(ssize_t *) RelinquishMagickMemory(rows); return(MagickFalse); } (void) memset(columns,0,rank*sizeof(*columns)); (void) memset(rows,0,rank*sizeof(*rows)); (void) memset(pivots,0,rank*sizeof(*pivots)); column=0; row=0; for (i=0; i < (ssize_t) rank; i++) { max=0.0; for (j=0; j < (ssize_t) rank; j++) if (pivots[j] != 1) { for (k=0; k < (ssize_t) rank; k++) if (pivots[k] != 0) { if (pivots[k] > 1) return(MagickFalse); } else if (fabs(matrix[j][k]) >= max) { max=fabs(matrix[j][k]); row=j; column=k; } } pivots[column]++; if (row != column) { for (k=0; k < (ssize_t) rank; k++) GaussJordanSwap(matrix[row][k],matrix[column][k]); for (k=0; k < (ssize_t) number_vectors; k++) GaussJordanSwap(vectors[k][row],vectors[k][column]); } rows[i]=row; columns[i]=column; if (matrix[column][column] == 0.0) return(MagickFalse); /* sigularity */ scale=PerceptibleReciprocal(matrix[column][column]); matrix[column][column]=1.0; for (j=0; j < (ssize_t) rank; j++) matrix[column][j]*=scale; for (j=0; j < (ssize_t) number_vectors; j++) vectors[j][column]*=scale; for (j=0; j < (ssize_t) rank; j++) if (j != column) { scale=matrix[j][column]; matrix[j][column]=0.0; for (k=0; k < (ssize_t) rank; k++) matrix[j][k]-=scale*matrix[column][k]; for (k=0; k < (ssize_t) number_vectors; k++) vectors[k][j]-=scale*vectors[k][column]; } } for (j=(ssize_t) rank-1; j >= 0; j--) if (columns[j] != rows[j]) for (i=0; i < (ssize_t) rank; i++) GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]); pivots=(ssize_t *) RelinquishMagickMemory(pivots); rows=(ssize_t *) RelinquishMagickMemory(rows); columns=(ssize_t *) RelinquishMagickMemory(columns); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x C o l u m n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixColumns() returns the number of columns in the matrix. % % The format of the GetMatrixColumns method is: % % size_t GetMatrixColumns(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->columns); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixElement() returns the specifed element in the matrix. % % The format of the GetMatrixElement method is: % % MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: return the matrix element in this buffer. % */ static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline MagickOffsetType ReadMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) LONG_MAX)); #else count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) LONG_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PREAD) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+ EdgeX(x,matrix_info->columns); if (matrix_info->type != DiskCache) { (void) memcpy(value,(unsigned char *) matrix_info->elements+i* matrix_info->stride,matrix_info->stride); return(MagickTrue); } count=ReadMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x R o w s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixRows() returns the number of rows in the matrix. % % The format of the GetMatrixRows method is: % % size_t GetMatrixRows(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info) { assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->rows); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L e a s t S q u a r e s A d d T e r m s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LeastSquaresAddTerms() adds one set of terms and associate results to the % given matrix and vectors for solving using least-squares function fitting. % % The format of the AcquireMagickMatrix method is: % % void LeastSquaresAddTerms(double **matrix,double **vectors, % const double *terms,const double *results,const size_t rank, % const size_t number_vectors); % % A description of each parameter follows: % % o matrix: the square matrix to add given terms/results to. % % o vectors: the result vectors to add terms/results to. % % o terms: the pre-calculated terms (without the unknown coefficent % weights) that forms the equation being added. % % o results: the result(s) that should be generated from the given terms % weighted by the yet-to-be-solved coefficents. % % o rank: the rank or size of the dimensions of the square matrix. % Also the length of vectors, and number of terms being added. % % o number_vectors: Number of result vectors, and number or results being % added. Also represents the number of separable systems of equations % that is being solved. % % Example of use... % % 2 dimensional Affine Equations (which are separable) % c0*x + c2*y + c4*1 => u % c1*x + c3*y + c5*1 => v % % double **matrix = AcquireMagickMatrix(3UL,3UL); % double **vectors = AcquireMagickMatrix(2UL,3UL); % double terms[3], results[2]; % ... % for each given x,y -> u,v % terms[0] = x; % terms[1] = y; % terms[2] = 1; % results[0] = u; % results[1] = v; % LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL); % ... % if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) { % c0 = vectors[0][0]; % c2 = vectors[0][1]; % c4 = vectors[0][2]; % c1 = vectors[1][0]; % c3 = vectors[1][1]; % c5 = vectors[1][2]; % } % else % printf("Matrix unsolvable\n"); % RelinquishMagickMatrix(matrix,3UL); % RelinquishMagickMatrix(vectors,2UL); % */ MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors, const double *terms,const double *results,const size_t rank, const size_t number_vectors) { ssize_t i, j; for (j=0; j < (ssize_t) rank; j++) { for (i=0; i < (ssize_t) rank; i++) matrix[i][j]+=terms[i]*terms[j]; for (i=0; i < (ssize_t) number_vectors; i++) vectors[i][j]+=results[i]*terms[j]; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a t r i x T o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MatrixToImage() returns a matrix as an image. The matrix elements must be % of type double otherwise nonsense is returned. % % The format of the MatrixToImage method is: % % Image *MatrixToImage(const MatrixInfo *matrix_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o matrix_info: the matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info, ExceptionInfo *exception) { CacheView *image_view; double max_value, min_value, scale_factor; Image *image; MagickBooleanType status; ssize_t y; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (matrix_info->stride < sizeof(double)) return((Image *) NULL); /* Determine range of matrix. */ (void) GetMatrixElement(matrix_info,0,0,&min_value); max_value=min_value; for (y=0; y < (ssize_t) matrix_info->rows; y++) { ssize_t x; for (x=0; x < (ssize_t) matrix_info->columns; x++) { double value; if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; if (value < min_value) min_value=value; else if (value > max_value) max_value=value; } } if ((min_value == 0.0) && (max_value == 0.0)) scale_factor=0; else if (min_value == max_value) { scale_factor=(double) QuantumRange/min_value; min_value=0; } else scale_factor=(double) QuantumRange/(max_value-min_value); /* Convert matrix to image. */ image=AcquireImage((ImageInfo *) NULL,exception); image->columns=matrix_info->columns; image->rows=matrix_info->rows; image->colorspace=GRAYColorspace; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double value; Quantum *q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; value=scale_factor*(value-min_value); *q=ClampToQuantum(value); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N u l l M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NullMatrix() sets all elements of the matrix to zero. % % The format of the memset method is: % % MagickBooleanType *NullMatrix(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info) { ssize_t x; ssize_t count, y; unsigned char value; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); if (matrix_info->type != DiskCache) { (void) memset(matrix_info->elements,0,(size_t) matrix_info->length); return(MagickTrue); } value=0; (void) lseek(matrix_info->file,0,SEEK_SET); for (y=0; y < (ssize_t) matrix_info->rows; y++) { for (x=0; x < (ssize_t) matrix_info->length; x++) { count=write(matrix_info->file,&value,sizeof(value)); if (count != (ssize_t) sizeof(value)) break; } if (x < (ssize_t) matrix_info->length) break; } return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e l i n q u i s h M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RelinquishMagickMatrix() frees the previously acquired matrix (array of % pointers to arrays of doubles). % % The format of the RelinquishMagickMatrix method is: % % double **RelinquishMagickMatrix(double **matrix, % const size_t number_rows) % % A description of each parameter follows: % % o matrix: the matrix to relinquish % % o number_rows: the first dimension of the acquired matrix (number of % pointers) % */ MagickExport double **RelinquishMagickMatrix(double **matrix, const size_t number_rows) { ssize_t i; if (matrix == (double **) NULL ) return(matrix); for (i=0; i < (ssize_t) number_rows; i++) matrix[i]=(double *) RelinquishMagickMemory(matrix[i]); matrix=(double **) RelinquishMagickMemory(matrix); return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMatrixElement() sets the specifed element in the matrix. % % The format of the SetMatrixElement method is: % % MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: set the matrix element to this value. % */ MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,const void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) y*matrix_info->columns+x; if ((i < 0) || ((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length)) return(MagickFalse); if (matrix_info->type != DiskCache) { (void) memcpy((unsigned char *) matrix_info->elements+i* matrix_info->stride,value,matrix_info->stride); return(MagickTrue); } count=WriteMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); }
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resource_.h" #include "MagickCore/resize.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,90.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,270.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(chop_info != (RectangleInfo *) NULL); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,chop_image,extent.y,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; register ssize_t j; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cmyk_images=NewImageList(); for (j=0; j < (ssize_t) GetImageListLength(images); j+=4) { register ssize_t i; assert(images != (Image *) NULL); cmyk_image=CloneImage(images,0,0,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace,exception); for (i=0; i < 4; i++) { image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { Quantum pixel; pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); switch (i) { case 0: SetPixelCyan(cmyk_image,pixel,q); break; case 1: SetPixelMagenta(cmyk_image,pixel,q); break; case 2: SetPixelYellow(cmyk_image,pixel,q); break; case 3: SetPixelBlack(cmyk_image,pixel,q); break; default: break; } p+=GetPixelChannels(images); q+=GetPixelChannels(cmyk_image); } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } AppendImageToList(&cmyk_images,cmyk_image); } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; crop_image->alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; offset.x=(ssize_t) (bounding_box.x+bounding_box.width); offset.y=(ssize_t) (bounding_box.y+bounding_box.height); if ((offset.x > (ssize_t) image->page.width) || (offset.y > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,crop_image,crop_image->rows,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) crop_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel); if ((traits == UndefinedPixelTrait) || (crop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(crop_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(crop_image); } if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CropImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); crop_image=NewImageList(); next=NewImageList(); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=(ssize_t) MagickRound((double) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((double) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=(ssize_t) MagickRound((double) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((double) (offset.y+(geometry.y < -1 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=(ssize_t) MagickRound((double) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((double) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=(ssize_t) MagickRound((double) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((double) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next != (Image *) NULL) AppendImageToList(&crop_image,next); } } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) excerpt_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel); if ((traits == UndefinedPixelTrait) || (excerpt_traits == UndefinedPixelTrait)) continue; SetPixelChannel(excerpt_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(excerpt_image); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; MagickBooleanType status; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); status=SetImageBackgroundColor(extent_image,exception); if (status == MagickFalse) { extent_image=DestroyImage(extent_image); return((Image *) NULL); } status=CompositeImage(extent_image,image,image->compose,MagickTrue, -geometry->x,-geometry->y,exception); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flip_image=CloneImage(image,0,0,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flip_image,flip_image->rows,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) flip_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel); if ((traits == UndefinedPixelTrait) || (flip_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flip_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(flip_image); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlipImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flop_image=CloneImage(image,0,0,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flop_image,flop_image->rows,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(flop_image)*flop_image->columns; for (x=0; x < (ssize_t) flop_image->columns; x++) { register ssize_t i; q-=GetPixelChannels(flop_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel); if ((traits == UndefinedPixelTrait) || (flop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flop_image,channel,p[i],q); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy, const ssize_t dx,const ssize_t dy,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait source_traits=GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((source_traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; SetPixelChannel(destination,channel,p[i],q); } p+=GetPixelChannels(source); q+=GetPixelChannels(destination); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); roll_image=CloneImage(image,0,0,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t columns, y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse) { splice_image=DestroyImage(splice_image); return((Image *) NULL); } if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) && (IsGrayColorspace(splice_image->colorspace) != MagickFalse)) (void) SetImageColorspace(splice_image,sRGBColorspace,exception); if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) && (splice_image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(splice_image,OpaqueAlpha,exception); (void) SetImageBackgroundColor(splice_image,exception); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns); image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_geometry.y,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_image->rows,2) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; if ((y < 0) || (y >= (ssize_t)splice_image->rows)) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, splice_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % This function destroys what it assumes to be a single image list. % If the input image is part of a larger list, all other images in that list % will be simply 'lost', not destroyed. % % Also if the crop generates a list of images only the first image is resized. % And finally if the crop succeeds and the resize failed, you will get a % cropped image, as well as a 'false' or 'failed' report. % % This function and should probably be deprecated in favor of direct calls % to CropImageToTiles() or ResizeImage(), as appropriate. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception) { Image *resize_image, *transform_image; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ (void) ParseRegionGeometry(transform_image,image_geometry,&geometry, exception); if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image, channel); if ((traits == UndefinedPixelTrait) || (transpose_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transpose_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(transpose_image); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1), 0,1,transverse_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(transverse_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; q-=GetPixelChannels(transverse_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image, channel); if ((traits == UndefinedPixelTrait) || (transverse_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transverse_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; crop_image->alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } geometry.x+=image->page.x; geometry.y+=image->page.y; return(CropImage(image,&geometry,exception)); }
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of pixels % represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*quantize_info)); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ColorspaceType colorspace; ssize_t y; /* Allocate image colormap. */ colorspace=image->colorspace; if (cube_info->quantize_info->colorspace != UndefinedColorspace) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register Quantum *magick_restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) { double intensity; /* Monochrome image. */ intensity=0.0; if ((image->colors > 1) && (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1))) intensity=(double) QuantumRange; image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (IssRGBCompatibleColorspace(colorspace) == MagickFalse)) (void) TransformImageColorspace(image,colorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && ((cube_info->quantize_info->colorspace == LinearGRAYColorspace) || (cube_info->quantize_info->colorspace == GRAYColorspace))) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; double bisect; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance)) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != MagickFalse) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel((MagickRealType) OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double pixel; register double alpha, beta, distance; register DoublePixelPacket *magick_restrict q; register PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(double) (QuantumScale*p->alpha); beta=(double) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register double alpha; register PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { register ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; const char *artifact; double amount; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; amount=1.0; artifact=GetImageArtifact(image,"dither:diffusion-amount"); if (artifact != (const char *) NULL) amount=StringToDoubleInterval(artifact,1.0); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; register Quantum *magick_restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7.0*amount*current[u-v].red/16; pixel.green+=7.0*amount*current[u-v].green/16; pixel.blue+=7.0*amount*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7.0*amount*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=previous[u+v].alpha/16; } pixel.red+=5.0*amount*previous[u].red/16; pixel.green+=5.0*amount*previous[u].green/16; pixel.blue+=5.0*amount*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5.0*amount*previous[u].alpha/16; if (x > 0) { pixel.red+=3.0*amount*previous[u-v].red/16; pixel.green+=3.0*amount*previous[u-v].green/16; pixel.blue+=3.0*amount*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3.0*amount*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int, ExceptionInfo *); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction,ExceptionInfo *exception) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" DoublePixelPacket color, pixel; MagickBooleanType proceed; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { register Quantum *magick_restrict q; register ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=p->weights[i]*p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) memmove(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) memset(cube_info->error,0,ErrorQueueLength*sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) memset(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) memset(cube_info->cache,(-1),sizeof(*cube_info->cache)*length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight); weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) memset(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; ssize_t index, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) memset(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) GetPixelIndex(image,p); if (image->alpha_trait == BlendPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) memset(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) ClampToQuantum((MagickRealType) QuantumRange*( \ MagickRound(QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PosterizeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if (image->alpha_trait != BlendPixelTrait) { if (SetImageGray(image,exception) != MagickFalse) (void) SetGrayscaleImage(image,exception); } if ((quantize_info->dither_method == NoDitherMethod) && (image->storage_class == PseudoClass) && (image->colors <= maximum_colors)) { if ((quantize_info->colorspace != UndefinedColorspace) && (quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace(image,quantize_info->colorspace, exception); return(MagickTrue); } depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait == BlendPixelTrait) && (depth > 5)) depth--; if (SetImageGray(image,exception) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { register ssize_t i; size_t n, number_children; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); if (intensity < (double) INT_MIN) intensity=(double) INT_MIN; if (intensity > (double) INT_MAX) intensity=(double) INT_MAX; return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; register ssize_t i; size_t extent; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); extent=MagickMax(image->colors+1,MagickMax(MaxColormapSize,MaxMap+1)); colormap_index=(ssize_t *) AcquireQuantumMemory(extent, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) memset(colormap_index,(-1),extent*sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } (void) memset(colormap_index,0,extent*sizeof(*colormap_index)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) { colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); }
GB_unop__erfc_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__erfc_fp64_fp64) // op(A') function: GB (_unop_tran__erfc_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = erfc (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = erfc (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = erfc (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ERFC || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__erfc_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = erfc (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = erfc (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__erfc_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_bool_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_bool_int64 // op(A') function: GB_unop_tran__identity_bool_int64 // C type: bool // A type: int64_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_bool_int64 ( bool *Cx, // Cx and Ax may be aliased const int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_bool_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolutiondepthwise_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { #if __aarch64__ const int w = bottom_blob.w; #endif const int outw = top_blob.w; const int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k10 = vld1q_f32(k0 + 12); float32x4_t _k11 = vld1q_f32(k0 + 16); float32x4_t _k12 = vld1q_f32(k0 + 20); float32x4_t _k20 = vld1q_f32(k0 + 24); float32x4_t _k21 = vld1q_f32(k0 + 28); float32x4_t _k22 = vld1q_f32(k0 + 32); int i = 0; #if __aarch64__ float* outptr1 = out.row(1); const float* r3 = img0.row(3); for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld1 {v10.4s, v11.4s}, [%3], #32 \n" // r10 r11 "mov v16.16b, %21.16b \n" // sum00 "mov v17.16b, %21.16b \n" // sum01 "mov v18.16b, %21.16b \n" // sum02 "mov v19.16b, %21.16b \n" // sum03 "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3] \n" // r12 r13 r14 r15 "mov v20.16b, %21.16b \n" // sum10 "mov v21.16b, %21.16b \n" // sum11 "mov v22.16b, %21.16b \n" // sum12 "mov v23.16b, %21.16b \n" // sum13 "fmla v16.4s, %15.4s, v10.4s \n" "fmla v17.4s, %15.4s, v11.4s \n" "fmla v18.4s, %15.4s, v12.4s \n" "fmla v19.4s, %15.4s, v13.4s \n" "fmla v20.4s, %12.4s, v10.4s \n" "fmla v21.4s, %12.4s, v11.4s \n" "fmla v22.4s, %12.4s, v12.4s \n" "fmla v23.4s, %12.4s, v13.4s \n" "add %3, %3, #32 \n" "fmla v16.4s, %16.4s, v11.4s \n" "fmla v17.4s, %16.4s, v12.4s \n" "fmla v18.4s, %16.4s, v13.4s \n" "fmla v19.4s, %16.4s, v14.4s \n" "fmla v20.4s, %13.4s, v11.4s \n" "fmla v21.4s, %13.4s, v12.4s \n" "fmla v22.4s, %13.4s, v13.4s \n" "fmla v23.4s, %13.4s, v14.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v10.4s, v11.4s}, [%4], #32 \n" // r20 r21 "fmla v16.4s, %17.4s, v12.4s \n" "fmla v17.4s, %17.4s, v13.4s \n" "fmla v18.4s, %17.4s, v14.4s \n" "fmla v19.4s, %17.4s, v15.4s \n" "fmla v20.4s, %14.4s, v12.4s \n" "fmla v21.4s, %14.4s, v13.4s \n" "fmla v22.4s, %14.4s, v14.4s \n" "fmla v23.4s, %14.4s, v15.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4] \n" // r22 r23 r24 r25 "fmla v16.4s, %18.4s, v10.4s \n" "fmla v17.4s, %18.4s, v11.4s \n" "fmla v18.4s, %18.4s, v12.4s \n" "fmla v19.4s, %18.4s, v13.4s \n" "fmla v20.4s, %15.4s, v10.4s \n" "fmla v21.4s, %15.4s, v11.4s \n" "fmla v22.4s, %15.4s, v12.4s \n" "fmla v23.4s, %15.4s, v13.4s \n" "add %4, %4, #32 \n" "fmla v16.4s, %19.4s, v11.4s \n" "fmla v17.4s, %19.4s, v12.4s \n" "fmla v18.4s, %19.4s, v13.4s \n" "fmla v19.4s, %19.4s, v14.4s \n" "fmla v20.4s, %16.4s, v11.4s \n" "fmla v21.4s, %16.4s, v12.4s \n" "fmla v22.4s, %16.4s, v13.4s \n" "fmla v23.4s, %16.4s, v14.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2], #32 \n" // r00 r01 "prfm pldl1keep, [%5, #256] \n" "ld1 {v24.4s, v25.4s}, [%5], #32 \n" // r30 r31 "fmla v16.4s, %20.4s, v12.4s \n" "fmla v17.4s, %20.4s, v13.4s \n" "fmla v18.4s, %20.4s, v14.4s \n" "fmla v19.4s, %20.4s, v15.4s \n" "fmla v20.4s, %17.4s, v12.4s \n" "fmla v21.4s, %17.4s, v13.4s \n" "fmla v22.4s, %17.4s, v14.4s \n" "fmla v23.4s, %17.4s, v15.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2] \n" // r02 r03 r04 r05 "prfm pldl1keep, [%5, #512] \n" "ld1 {v26.4s, v27.4s, v28.4s, v29.4s}, [%5] \n" // r32 r33 r34 r35 "fmla v16.4s, %12.4s, v10.4s \n" "fmla v17.4s, %12.4s, v11.4s \n" "fmla v18.4s, %12.4s, v12.4s \n" "fmla v19.4s, %12.4s, v13.4s \n" "fmla v20.4s, %18.4s, v24.4s \n" "fmla v21.4s, %18.4s, v25.4s \n" "fmla v22.4s, %18.4s, v26.4s \n" "fmla v23.4s, %18.4s, v27.4s \n" "add %2, %2, #32 \n" "fmla v16.4s, %13.4s, v11.4s \n" "fmla v17.4s, %13.4s, v12.4s \n" "fmla v18.4s, %13.4s, v13.4s \n" "fmla v19.4s, %13.4s, v14.4s \n" "fmla v20.4s, %19.4s, v25.4s \n" "fmla v21.4s, %19.4s, v26.4s \n" "fmla v22.4s, %19.4s, v27.4s \n" "fmla v23.4s, %19.4s, v28.4s \n" "add %5, %5, #32 \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %14.4s, v13.4s \n" "fmla v18.4s, %14.4s, v14.4s \n" "fmla v19.4s, %14.4s, v15.4s \n" "fmla v20.4s, %20.4s, v26.4s \n" "fmla v21.4s, %20.4s, v27.4s \n" "fmla v22.4s, %20.4s, v28.4s \n" "fmla v23.4s, %20.4s, v29.4s \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%3, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%3] \n" // r10 r11 r12 r13 "mov v16.16b, %21.16b \n" // sum00 "mov v17.16b, %21.16b \n" // sum01 "mov v18.16b, %21.16b \n" // sum10 "mov v19.16b, %21.16b \n" // sum11 "fmla v16.4s, %15.4s, v10.4s \n" "fmla v17.4s, %15.4s, v11.4s \n" "fmla v18.4s, %12.4s, v10.4s \n" "fmla v19.4s, %12.4s, v11.4s \n" "add %3, %3, #32 \n" "fmla v16.4s, %16.4s, v11.4s \n" "fmla v17.4s, %16.4s, v12.4s \n" "fmla v18.4s, %13.4s, v11.4s \n" "fmla v19.4s, %13.4s, v12.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%4] \n" // r20 r21 r22 r23 "fmla v16.4s, %17.4s, v12.4s \n" "fmla v17.4s, %17.4s, v13.4s \n" "fmla v18.4s, %14.4s, v12.4s \n" "fmla v19.4s, %14.4s, v13.4s \n" "add %4, %4, #32 \n" "fmla v16.4s, %18.4s, v20.4s \n" "fmla v17.4s, %18.4s, v21.4s \n" "fmla v18.4s, %15.4s, v20.4s \n" "fmla v19.4s, %15.4s, v21.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2] \n" // r00 r01 r02 r03 "fmla v16.4s, %19.4s, v21.4s \n" "fmla v17.4s, %19.4s, v22.4s \n" "fmla v18.4s, %16.4s, v21.4s \n" "fmla v19.4s, %16.4s, v22.4s \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%5] \n" // r30 r31 r32 r33 "fmla v16.4s, %20.4s, v22.4s \n" "fmla v17.4s, %20.4s, v23.4s \n" "fmla v18.4s, %17.4s, v22.4s \n" "fmla v19.4s, %17.4s, v23.4s \n" "add %2, %2, #32 \n" "fmla v16.4s, %12.4s, v10.4s \n" "fmla v17.4s, %12.4s, v11.4s \n" "fmla v18.4s, %18.4s, v24.4s \n" "fmla v19.4s, %18.4s, v25.4s \n" "add %5, %5, #32 \n" "fmla v16.4s, %13.4s, v11.4s \n" "fmla v17.4s, %13.4s, v12.4s \n" "fmla v18.4s, %19.4s, v25.4s \n" "fmla v19.4s, %19.4s, v26.4s \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %14.4s, v13.4s \n" "fmla v18.4s, %20.4s, v26.4s \n" "fmla v19.4s, %20.4s, v27.4s \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" "st1 {v18.4s, v19.4s}, [%1], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%3, #384] \n" "ld1 {v10.4s, v11.4s, v12.4s}, [%3] \n" // r10 r11 r12 "mov v16.16b, %21.16b \n" // sum0 "mov v17.16b, %21.16b \n" // sum1 "fmla v16.4s, %15.4s, v10.4s \n" "fmla v17.4s, %12.4s, v10.4s \n" "add %3, %3, #16 \n" "fmla v16.4s, %16.4s, v11.4s \n" "fmla v17.4s, %13.4s, v11.4s \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v20.4s, v21.4s, v22.4s}, [%4] \n" // r20 r21 r22 "fmla v16.4s, %17.4s, v12.4s \n" "fmla v17.4s, %14.4s, v12.4s \n" "add %4, %4, #16 \n" "fmla v16.4s, %18.4s, v20.4s \n" "fmla v17.4s, %15.4s, v20.4s \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v10.4s, v11.4s, v12.4s}, [%2] \n" // r00 r01 r02 "fmla v16.4s, %19.4s, v21.4s \n" "fmla v17.4s, %16.4s, v21.4s \n" "prfm pldl1keep, [%5, #384] \n" "ld1 {v24.4s, v25.4s, v26.4s}, [%5] \n" // r30 r31 r32 "fmla v16.4s, %20.4s, v22.4s \n" "fmla v17.4s, %17.4s, v22.4s \n" "add %2, %2, #16 \n" "fmla v16.4s, %12.4s, v10.4s \n" "fmla v17.4s, %18.4s, v24.4s \n" "add %5, %5, #16 \n" "fmla v16.4s, %13.4s, v11.4s \n" "fmla v17.4s, %19.4s, v25.4s \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %20.4s, v26.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v24", "v25", "v26"); } r0 += 2 * 4 + w * 4; r1 += 2 * 4 + w * 4; r2 += 2 * 4 + w * 4; r3 += 2 * 4 + w * 4; outptr0 += outw * 4; outptr1 += outw * 4; } #endif // __aarch64__ for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v10.4s, v11.4s}, [%1], #32 \n" // r00 r01 "mov v16.16b, %17.16b \n" // sum00 "mov v17.16b, %17.16b \n" // sum01 "mov v18.16b, %17.16b \n" // sum02 "mov v19.16b, %17.16b \n" // sum03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1] \n" // r02 r03 r04 r05 "fmla v16.4s, %8.4s, v10.4s \n" "fmla v17.4s, %8.4s, v11.4s \n" "fmla v18.4s, %8.4s, v12.4s \n" "fmla v19.4s, %8.4s, v13.4s \n" "add %1, %1, #32 \n" "fmla v16.4s, %9.4s, v11.4s \n" "fmla v17.4s, %9.4s, v12.4s \n" "fmla v18.4s, %9.4s, v13.4s \n" "fmla v19.4s, %9.4s, v14.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2], #32 \n" // r10 r11 "fmla v16.4s, %10.4s, v12.4s \n" "fmla v17.4s, %10.4s, v13.4s \n" "fmla v18.4s, %10.4s, v14.4s \n" "fmla v19.4s, %10.4s, v15.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2] \n" // r12 r13 r14 r15 "fmla v16.4s, %11.4s, v10.4s \n" "fmla v17.4s, %11.4s, v11.4s \n" "fmla v18.4s, %11.4s, v12.4s \n" "fmla v19.4s, %11.4s, v13.4s \n" "add %2, %2, #32 \n" "fmla v16.4s, %12.4s, v11.4s \n" "fmla v17.4s, %12.4s, v12.4s \n" "fmla v18.4s, %12.4s, v13.4s \n" "fmla v19.4s, %12.4s, v14.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v10.4s, v11.4s}, [%3], #32 \n" // r20 r21 "fmla v16.4s, %13.4s, v12.4s \n" "fmla v17.4s, %13.4s, v13.4s \n" "fmla v18.4s, %13.4s, v14.4s \n" "fmla v19.4s, %13.4s, v15.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3] \n" // r22 r23 r24 r25 "fmla v16.4s, %14.4s, v10.4s \n" "fmla v17.4s, %14.4s, v11.4s \n" "fmla v18.4s, %14.4s, v12.4s \n" "fmla v19.4s, %14.4s, v13.4s \n" "add %3, %3, #32 \n" "fmla v16.4s, %15.4s, v11.4s \n" "fmla v17.4s, %15.4s, v12.4s \n" "fmla v18.4s, %15.4s, v13.4s \n" "fmla v19.4s, %15.4s, v14.4s \n" "fmla v16.4s, %16.4s, v12.4s \n" "fmla v17.4s, %16.4s, v13.4s \n" "fmla v18.4s, %16.4s, v14.4s \n" "fmla v19.4s, %16.4s, v15.4s \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r00 r01 "vmov q10, %q17 \n" // sum00 "vmov q11, %q17 \n" // sum01 "vmla.f32 q10, %q8, q14 \n" "vmla.f32 q11, %q8, q15 \n" "vmla.f32 q10, %q9, q15 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r02 r03 "vmov q12, %q17 \n" // sum02 "vmov q13, %q17 \n" // sum03 "vmla.f32 q12, %q8, q14 \n" "vmla.f32 q11, %q9, q14 \n" "vmla.f32 q13, %q8, q15 \n" "vmla.f32 q10, %q10, q14 \n" "vmla.f32 q12, %q9, q15 \n" "vmla.f32 q11, %q10, q15 \n" // "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128] \n" // r04 r05 "vmla.f32 q13, %q9, q14 \n" "vmla.f32 q12, %q10, q14 \n" "vmla.f32 q13, %q10, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r10 r11 "vmla.f32 q10, %q11, q14 \n" "vmla.f32 q11, %q11, q15 \n" "vmla.f32 q10, %q12, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r12 r13 "vmla.f32 q12, %q11, q14 \n" "vmla.f32 q11, %q12, q14 \n" "vmla.f32 q13, %q11, q15 \n" "vmla.f32 q10, %q13, q14 \n" "vmla.f32 q12, %q12, q15 \n" "vmla.f32 q11, %q13, q15 \n" // "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128] \n" // r14 r15 "vmla.f32 q13, %q12, q14 \n" "vmla.f32 q12, %q13, q14 \n" "vmla.f32 q13, %q13, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r20 r21 "vmla.f32 q10, %q14, q14 \n" "vmla.f32 q11, %q14, q15 \n" "vmla.f32 q10, %q15, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r22 r23 "vmla.f32 q12, %q14, q14 \n" "vmla.f32 q11, %q15, q14 \n" "vmla.f32 q13, %q14, q15 \n" "vmla.f32 q10, %q16, q14 \n" "vmla.f32 q12, %q15, q15 \n" "vmla.f32 q11, %q16, q15 \n" // "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128] \n" // r24 r25 "vmla.f32 q13, %q15, q14 \n" "vmla.f32 q12, %q16, q14 \n" "vmla.f32 q13, %q16, q15 \n" "vstm %0!, {d20-d27} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1] \n" // r00 r01 r02 r03 "mov v16.16b, %17.16b \n" // sum00 "mov v17.16b, %17.16b \n" // sum01 "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "fmla v16.4s, %8.4s, v12.4s \n" "fmla v17.4s, %8.4s, v13.4s \n" "add %1, %1, #32 \n" "fmla v18.4s, %9.4s, v13.4s \n" "fmla v19.4s, %9.4s, v14.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2] \n" // r10 r11 r12 r13 "fmla v16.4s, %10.4s, v14.4s \n" "fmla v17.4s, %10.4s, v15.4s \n" "add %2, %2, #32 \n" "fmla v18.4s, %11.4s, v20.4s \n" "fmla v19.4s, %11.4s, v21.4s \n" "fmla v16.4s, %12.4s, v21.4s \n" "fmla v17.4s, %12.4s, v22.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3] \n" // r20 r21 r22 r23 "fmla v18.4s, %13.4s, v22.4s \n" "fmla v19.4s, %13.4s, v23.4s \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %14.4s, v13.4s \n" "fmla v18.4s, %15.4s, v13.4s \n" "fmla v19.4s, %15.4s, v14.4s \n" "fmla v16.4s, %16.4s, v14.4s \n" "fmla v17.4s, %16.4s, v15.4s \n" "add %3, %3, #32 \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n" // r00 r01 "vmov q10, %q17 \n" // sum00 "vmov q11, %q17 \n" // sum01 "vmla.f32 q10, %q8, q12 \n" "vmla.f32 q11, %q8, q13 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128] \n" // r02 r03 "vmla.f32 q10, %q9, q13 \n" "vmla.f32 q11, %q9, q14 \n" "vmla.f32 q10, %q10, q14 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n" // r10 r11 "vmla.f32 q11, %q10, q15 \n" "vmla.f32 q10, %q11, q12 \n" "vmla.f32 q11, %q11, q13 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128] \n" // r12 r13 "vmla.f32 q10, %q12, q13 \n" "vmla.f32 q11, %q12, q14 \n" "vmla.f32 q10, %q13, q14 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n" // r20 r21 "vmla.f32 q11, %q13, q15 \n" "vmla.f32 q10, %q14, q12 \n" "vmla.f32 q11, %q14, q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128] \n" // r22 r23 "vmla.f32 q10, %q15, q13 \n" "vmla.f32 q11, %q15, q14 \n" "vmla.f32 q10, %q16, q14 \n" "vmla.f32 q11, %q16, q15 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); vst1q_f32(outptr0, _sum0); r0 += 4; r1 += 4; r2 += 4; outptr0 += 4; } r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; } } } static void convdw3x3s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); const float* k0 = kernel.row(g); float* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k10 = vld1q_f32(k0 + 12); float32x4_t _k11 = vld1q_f32(k0 + 16); float32x4_t _k12 = vld1q_f32(k0 + 20); float32x4_t _k20 = vld1q_f32(k0 + 24); float32x4_t _k21 = vld1q_f32(k0 + 28); float32x4_t _k22 = vld1q_f32(k0 + 32); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%1], #64 \n" // r00 r01 r02 r03 "mov v28.16b, %17.16b \n" // sum00 "mov v29.16b, %17.16b \n" // sum01 "mov v30.16b, %17.16b \n" // sum02 "mov v31.16b, %17.16b \n" // sum03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v14.4s, v15.4s, v16.4s, v17.4s}, [%1], #64 \n" // r04 r05 r06 r07 "fmla v28.4s, %8.4s, v10.4s \n" "fmla v29.4s, %8.4s, v12.4s \n" "fmla v30.4s, %8.4s, v14.4s \n" "fmla v31.4s, %8.4s, v16.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" // r08 "fmla v28.4s, %9.4s, v11.4s \n" "fmla v29.4s, %9.4s, v13.4s \n" "fmla v30.4s, %9.4s, v15.4s \n" "fmla v31.4s, %9.4s, v17.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v28.4s, %10.4s, v12.4s \n" "fmla v29.4s, %10.4s, v14.4s \n" "fmla v30.4s, %10.4s, v16.4s \n" "fmla v31.4s, %10.4s, v18.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" // r14 r15 r16 r17 "fmla v28.4s, %11.4s, v20.4s \n" "fmla v29.4s, %11.4s, v22.4s \n" "fmla v30.4s, %11.4s, v24.4s \n" "fmla v31.4s, %11.4s, v26.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" // r18 "fmla v28.4s, %12.4s, v21.4s \n" "fmla v29.4s, %12.4s, v23.4s \n" "fmla v30.4s, %12.4s, v25.4s \n" "fmla v31.4s, %12.4s, v27.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v28.4s, %13.4s, v22.4s \n" "fmla v29.4s, %13.4s, v24.4s \n" "fmla v30.4s, %13.4s, v26.4s \n" "fmla v31.4s, %13.4s, v19.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v14.4s, v15.4s, v16.4s, v17.4s}, [%3], #64 \n" // r24 r25 r26 r27 "fmla v28.4s, %14.4s, v10.4s \n" "fmla v29.4s, %14.4s, v12.4s \n" "fmla v30.4s, %14.4s, v14.4s \n" "fmla v31.4s, %14.4s, v16.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v18.4s}, [%3] \n" // r28 "fmla v28.4s, %15.4s, v11.4s \n" "fmla v29.4s, %15.4s, v13.4s \n" "fmla v30.4s, %15.4s, v15.4s \n" "fmla v31.4s, %15.4s, v17.4s \n" "fmla v28.4s, %16.4s, v12.4s \n" "fmla v29.4s, %16.4s, v14.4s \n" "fmla v30.4s, %16.4s, v16.4s \n" "fmla v31.4s, %16.4s, v18.4s \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r00 r01 "vmov q10, %q17 \n" // sum00 "vmla.f32 q10, %q8, q14 \n" "vmov q11, %q17 \n" // sum01 "vmla.f32 q10, %q9, q15 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r02 r03 "vmla.f32 q11, %q8, q14 \n" "vmla.f32 q10, %q10, q14 \n" "vmov q12, %q17 \n" // sum02 "vmla.f32 q11, %q9, q15 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r04 r05 "vmla.f32 q12, %q8, q14 \n" "vmla.f32 q11, %q10, q14 \n" "vmla.f32 q12, %q9, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r10 r11 "vmla.f32 q10, %q11, q14 \n" "vmov q13, %q17 \n" // sum03 "vmla.f32 q10, %q12, q15 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r06 r07 "vmla.f32 q13, %q8, q14 \n" "vmla.f32 q12, %q10, q14 \n" "vmla.f32 q13, %q9, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r12 r13 "vmla.f32 q11, %q11, q14 \n" "vmla.f32 q10, %q13, q14 \n" "vmla.f32 q11, %q12, q15 \n" "vld1.f32 {d28-d29}, [%1 :128] \n" // r08 "vmla.f32 q13, %q10, q14 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r14 r15 "vmla.f32 q12, %q11, q14 \n" "vmla.f32 q11, %q13, q14 \n" "vmla.f32 q12, %q12, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r20 r21 "vmla.f32 q10, %q14, q14 \n" "vmla.f32 q10, %q15, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r16 r17 "vmla.f32 q13, %q11, q14 \n" "vmla.f32 q12, %q13, q14 \n" "vmla.f32 q13, %q12, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r22 r23 "vmla.f32 q11, %q14, q14 \n" "vmla.f32 q10, %q16, q14 \n" "vmla.f32 q11, %q15, q15 \n" "vld1.f32 {d28-d29}, [%2 :128] \n" // r18 "vmla.f32 q13, %q13, q14 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r24 r25 "vmla.f32 q12, %q14, q14 \n" "vmla.f32 q11, %q16, q14 \n" "vmla.f32 q12, %q15, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r26 r27 "vmla.f32 q13, %q14, q14 \n" "vmla.f32 q12, %q16, q14 \n" "vmla.f32 q13, %q15, q15 \n" "vld1.f32 {d28-d29}, [%3 :128] \n" // r28 "vmla.f32 q13, %q16, q14 \n" "vstm %0!, {d20-d27} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%1], #64 \n" // r00 r01 r02 r03 "mov v20.16b, %17.16b \n" // sum00 "mov v21.16b, %17.16b \n" // sum01 "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "fmla v20.4s, %8.4s, v10.4s \n" "fmla v21.4s, %8.4s, v12.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v14.4s}, [%1] \n" // r04 "fmla v22.4s, %9.4s, v11.4s \n" "fmla v23.4s, %9.4s, v13.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v20.4s, %10.4s, v12.4s \n" "fmla v21.4s, %10.4s, v14.4s \n" "fmla v22.4s, %11.4s, v16.4s \n" "fmla v23.4s, %11.4s, v18.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v15.4s}, [%2] \n" // r14 "fmla v20.4s, %12.4s, v17.4s \n" "fmla v21.4s, %12.4s, v19.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v22.4s, %13.4s, v18.4s \n" "fmla v23.4s, %13.4s, v15.4s \n" "fmla v20.4s, %14.4s, v10.4s \n" "fmla v21.4s, %14.4s, v12.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v14.4s}, [%3] \n" // r24 "fmla v22.4s, %15.4s, v11.4s \n" "fmla v23.4s, %15.4s, v13.4s \n" "fmla v20.4s, %16.4s, v12.4s \n" "fmla v21.4s, %16.4s, v14.4s \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "st1 {v20.4s, v21.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n" // r00 r01 "vmov q10, %q17 \n" // sum00 "vmov q11, %q17 \n" // sum01 "vmla.f32 q10, %q8, q12 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r02 r03 "vmla.f32 q10, %q9, q13 \n" "vmla.f32 q11, %q8, q14 \n" "vmla.f32 q10, %q10, q14 \n" "vld1.f32 {d24-d25}, [%1 :128] \n" // r04 "vmla.f32 q11, %q9, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r10 r11 "vmla.f32 q11, %q10, q12 \n" "vmla.f32 q10, %q11, q14 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n" // r12 r13 "vmla.f32 q10, %q12, q15 \n" "vmla.f32 q11, %q11, q12 \n" "vmla.f32 q10, %q13, q12 \n" "vld1.f32 {d28-d29}, [%2 :128] \n" // r14 "vmla.f32 q11, %q12, q13 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n" // r20 r21 "vmla.f32 q11, %q13, q14 \n" "vmla.f32 q10, %q14, q12 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r22 r23 "vmla.f32 q10, %q15, q13 \n" "vmla.f32 q11, %q14, q14 \n" "vmla.f32 q10, %q16, q14 \n" "vld1.f32 {d24-d25}, [%3 :128] \n" // r24 "vmla.f32 q11, %q15, q15 \n" "vmla.f32 q11, %q16, q12 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); vst1q_f32(outptr0, _sum0); r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; outptr0 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
GB_unop__isfinite_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isfinite_bool_fp64) // op(A') function: GB (_unop_tran__isfinite_bool_fp64) // C type: bool // A type: double // cast: double cij = (aij) // unaryop: cij = isfinite (aij) #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isfinite (x) ; // casting #define GB_CAST(z, aij) \ double z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (aij) ; \ Cx [pC] = isfinite (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isfinite_bool_fp64) ( bool *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = (aij) ; Cx [p] = isfinite (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = (aij) ; Cx [p] = isfinite (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isfinite_bool_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SpatialConvolutionMM.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "THNN/generic/SpatialConvolutionMM.c" #else #include <ATen/div_rtn.h> static inline void THNN_(SpatialConvolutionMM_shapeCheck)( THTensor *input, THTensor *gradOutput, THTensor *weight, THTensor *bias, int kH, int kW, int dH, int dW, int padH, int padW, int weight_nullable) { THArgCheck(kW > 0 && kH > 0, 9, "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); THArgCheck(dW > 0 && dH > 0, 11, "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); if (weight != NULL) { THNN_ARGCHECK(!weight->is_empty() && (weight->dim() == 2 || weight->dim() == 4), 5, weight, "non-empty 2D or 4D weight tensor expected, but got: %s"); if (bias != NULL) { THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size(0)); } } else if (!weight_nullable) { THError("weight tensor is expected to be non-nullable"); } int ndim = input->dim(); int dimf = 0; int dimh = 1; int dimw = 2; if (ndim == 4) { dimf++; dimh++; dimw++; } THNN_ARGCHECK(!input->is_empty() && (ndim == 3 || ndim == 4), 2, input, "non-empty 3D or 4D input tensor expected but got: %s"); int64_t inputHeight = input->size(dimh); int64_t inputWidth = input->size(dimw); int64_t exactInputHeight = inputHeight + 2 * padH; int64_t exactInputWidth = inputWidth + 2 * padW; if (exactInputHeight < kH || exactInputWidth < kW) { THError("Calculated padded input size per channel: (%ld x %ld). " "Kernel size: (%d x %d). Kernel size can't be greater than actual input size", exactInputHeight, exactInputWidth, kH, kW); } int64_t outputHeight = div_rtn<int64_t>(exactInputHeight - kH, dH) + 1; int64_t outputWidth = div_rtn<int64_t>(exactInputWidth - kW, dW) + 1; if (outputWidth < 1 || outputHeight < 1) { THError("Given input size per channel: (%ld x %ld). " "Calculated output size per channel: (%ld x %ld). Output size is too small", inputHeight, inputWidth, outputHeight, outputWidth); } if (weight != NULL) { int64_t nInputPlane = weight->size(1); if (weight->dim() == 2) { nInputPlane /= (kH * kW); } THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); } if (gradOutput != NULL) { if (weight != NULL) { int64_t nOutputPlane = weight->size(0); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); } else if (bias != NULL) { int64_t nOutputPlane = THTensor_sizeLegacyNoScalars(bias, 0); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); } THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); } } static THTensor* THNN_(newViewWeightMM2d)(THTensor *weight) { weight = THTensor_(newContiguous)(weight); if (weight->dim() == 4) { int64_t s1 = weight->size(0); int64_t s2 = weight->size(1) * weight->size(2) * weight->size(3); THTensor *old_weight = weight; weight = THTensor_(newWithStorage2d)(THTensor_getStoragePtr(weight), weight->storage_offset(), s1, -1, s2, -1); c10::raw::intrusive_ptr::decref(old_weight); } return weight; } static void THNN_(SpatialConvolutionMM_updateOutput_frame)( THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight, int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight) { int64_t i; THTensor *output2d; THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, outputWidth, outputHeight); output2d = THTensor_(newWithStorage2d)(THTensor_getStoragePtr(output), output->storage_offset(), nOutputPlane, -1, outputHeight*outputWidth, -1); if (bias) { for(i = 0; i < nOutputPlane; i++) THVector_(fill) (THStorage_(data)(THTensor_getStoragePtr(output)) + output->storage_offset() + output->stride(0) * i, THTensor_(get1d)(bias, i), outputHeight*outputWidth); } else { THTensor_(zero)(output); } THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput); c10::raw::intrusive_ptr::decref(output2d); } void THNN_(SpatialConvolutionMM_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) { weight = THNN_(newViewWeightMM2d)(weight); THNN_(SpatialConvolutionMM_shapeCheck) (input, NULL, weight, bias, kH, kW, dH, dW, padH, padW, 0); input = THTensor_(newContiguous)(input); int ndim = input->dim(); int dimf = 0; int dimh = 1; int dimw = 2; if (ndim == 4) { dimf++; dimh++; dimw++; } int64_t nInputPlane = input->size(dimf); int64_t inputHeight = input->size(dimh); int64_t inputWidth = input->size(dimw); int64_t nOutputPlane = weight->size(0); int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1; int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1; if(input->dim() == 3) { THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth); THNN_(SpatialConvolutionMM_updateOutput_frame) (input, output, weight, bias, finput, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); } else { int64_t T = input->size(0); int64_t t; THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth); #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(SpatialConvolutionMM_updateOutput_frame) (input_t, output_t, weight, bias, finput_t, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); c10::raw::intrusive_ptr::decref(input_t); c10::raw::intrusive_ptr::decref(output_t); c10::raw::intrusive_ptr::decref(finput_t); } } c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(weight); } static void THNN_(SpatialConvolutionMM_updateGradInput_frame)( THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) { THTensor *gradOutput2d = THTensor_(newWithStorage2d) (THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), gradOutput->size(0), -1, gradOutput->size(1)*gradOutput->size(2), -1); THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d); c10::raw::intrusive_ptr::decref(gradOutput2d); THTensor_(zero)(gradInput); THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH, gradInput->size(0), gradInput->size(2), gradInput->size(1), gradOutput->size(2), gradOutput->size(1)); } void THNN_(SpatialConvolutionMM_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH) { weight = THNN_(newViewWeightMM2d)(weight); THNN_(SpatialConvolutionMM_shapeCheck) (input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW, 0); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); THTensor_(resizeAs)(gradInput, input); THTensor_(resizeAs)(fgradInput, finput); // depending on the BLAS library, fgradInput (result tensor) might // be left uninitialized on zero alpha, which might lead to weird behavior // hence, to be safe, zero it THTensor_(zero)(fgradInput); THTensor *tweight = THTensor_(new)(); THTensor_(transpose)(tweight, weight, 0, 1); if(input->dim() == 3) { THNN_(SpatialConvolutionMM_updateGradInput_frame)(gradInput, gradOutput, tweight, fgradInput, kW, kH, dW, dH, padW, padH); } else { int64_t T = input->size(0); int64_t t; #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); THNN_(SpatialConvolutionMM_updateGradInput_frame)(gradInput_t, gradOutput_t, tweight, fgradInput_t, kW, kH, dW, dH, padW, padH); c10::raw::intrusive_ptr::decref(gradInput_t); c10::raw::intrusive_ptr::decref(gradOutput_t); c10::raw::intrusive_ptr::decref(fgradInput_t); } } c10::raw::intrusive_ptr::decref(tweight); c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(gradOutput); c10::raw::intrusive_ptr::decref(weight); } static void THNN_(SpatialConvolutionMM_accGradParameters_frame)( THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, scalar_t scale) { int64_t i; THTensor *gradOutput2d = THTensor_(newWithStorage2d) (THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), gradOutput->size(0), -1, gradOutput->size(1)*gradOutput->size(2), -1); if (gradWeight) { THTensor *tfinput = THTensor_(new)(); THTensor_(transpose)(tfinput, finput, 0, 1); THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, tfinput); c10::raw::intrusive_ptr::decref(tfinput); } if (gradBias) { for(i = 0; i < THTensor_sizeLegacyNoScalars(gradBias, 0); i++) { int64_t k; scalar_t sum = 0; scalar_t *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput2d)) + gradOutput2d->storage_offset() + i*gradOutput2d->stride(0); for(k = 0; k < gradOutput2d->size(1); k++) sum += data[k]; (THStorage_(data)(THTensor_getStoragePtr(gradBias)) + gradBias->storage_offset())[i] += scale*sum; } } c10::raw::intrusive_ptr::decref(gradOutput2d); } void THNN_(SpatialConvolutionMM_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, // can be NULL if gradWeight = NULL THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, accreal scale_) { scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); if (gradWeight) { THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous"); gradWeight = THNN_(newViewWeightMM2d)(gradWeight); } if (gradBias) { THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous"); } THNN_(SpatialConvolutionMM_shapeCheck) (input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, 1); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); if(input->dim() == 3) { THNN_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale); } else { int64_t T = input->size(0); int64_t t; for(t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = NULL; if (gradWeight) { finput_t = THTensor_(newSelect)(finput, 0, t); } THNN_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale); c10::raw::intrusive_ptr::decref(gradOutput_t); if (gradWeight) { c10::raw::intrusive_ptr::decref(finput_t); } } } c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(gradOutput); if (gradWeight) { c10::raw::intrusive_ptr::decref(gradWeight); } } #endif
sparse-false.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <libgen.h> #include <omp.h> #define EPSILON 1.0E-6 unsigned int bots_arg_size = 50; unsigned int bots_arg_size_1 = 80; #define TRUE 1 #define FALSE 0 #define BOTS_RESULT_SUCCESSFUL 1 #define BOTS_RESULT_UNSUCCESSFUL 0 #define USE_FALSE 3 unsigned int FALSE_ARRAY[16]; /*********************************************************************** * checkmat: **********************************************************************/ int checkmat (float *M, float *N) { int i, j; float r_err; int bad = 0; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { r_err = M[i*bots_arg_size_1+j] - N[i*bots_arg_size_1+j]; if (r_err < 0.0 ) r_err = -r_err; r_err = r_err / M[i*bots_arg_size_1+j]; if(r_err > EPSILON) { fprintf(stderr,"Checking failure: A[%d][%d]=%f B[%d][%d]=%f; Relative Error=%f\n", i,j, M[i*bots_arg_size_1+j], i,j, N[i*bots_arg_size_1+j], r_err); bad = 1; } } } return bad ? FALSE : TRUE; } /*********************************************************************** * genmat: **********************************************************************/ void genmat (float *M[]) { int null_entry, init_val, i, j, ii, jj; float *p; init_val = 1325; /* generating the structure */ for (ii=0; ii < bots_arg_size; ii++) { for (jj=0; jj < bots_arg_size; jj++) { /* computing null entries */ null_entry=FALSE; if ((ii<jj) && (ii%3 !=0)) null_entry = TRUE; if ((ii>jj) && (jj%3 !=0)) null_entry = TRUE; if (ii%2==1) null_entry = TRUE; if (jj%2==1) null_entry = TRUE; if (ii==jj) null_entry = FALSE; if (ii==jj-1) null_entry = FALSE; if (ii-1 == jj) null_entry = FALSE; /* allocating matrix */ if (null_entry == FALSE){ M[ii*bots_arg_size+jj] = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); if ((M[ii*bots_arg_size+jj] == NULL)) { fprintf(stderr,"Error: Out of memory\n"); exit(101); } /* initializing matrix */ p = M[ii*bots_arg_size+jj]; for (i = 0; i < bots_arg_size_1; i++) { for (j = 0; j < bots_arg_size_1; j++) { init_val = (3125 * init_val) % 65536; (*p) = (float)((init_val - 32768.0) / 16384.0); p++; } } } else { M[ii*bots_arg_size+jj] = NULL; } } } } /*********************************************************************** * print_structure: **********************************************************************/ void print_structure(char *name, float *M[]) { int ii, jj; fprintf(stderr,"Structure for matrix %s @ 0x%p\n",name, M); for (ii = 0; ii < bots_arg_size; ii++) { for (jj = 0; jj < bots_arg_size; jj++) { if (M[ii*bots_arg_size+jj]!=NULL) {fprintf(stderr,"x");} else fprintf(stderr," "); } fprintf(stderr,"\n"); } fprintf(stderr,"\n"); } /*********************************************************************** * allocate_clean_block: **********************************************************************/ float * allocate_clean_block() { int i,j; float *p, *q; p = (float *) malloc(bots_arg_size_1*bots_arg_size_1*sizeof(float)); q=p; if (p!=NULL){ for (i = 0; i < bots_arg_size_1; i++) for (j = 0; j < bots_arg_size_1; j++){(*p)=0.0; p++;} } else { fprintf(stderr,"Error: Out of memory\n"); exit (101); } return (q); } /*********************************************************************** * lu0: **********************************************************************/ void lu0(float *diag) { int i, j, k; for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) { diag[i*bots_arg_size_1+k] = diag[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) diag[i*bots_arg_size_1+j] = diag[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k] * diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bdiv: **********************************************************************/ void bdiv(float *diag, float *row) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (k=0; k<bots_arg_size_1; k++) { row[i*bots_arg_size_1+k] = row[i*bots_arg_size_1+k] / diag[k*bots_arg_size_1+k]; for (j=k+1; j<bots_arg_size_1; j++) row[i*bots_arg_size_1+j] = row[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*diag[k*bots_arg_size_1+j]; } } /*********************************************************************** * bmod: **********************************************************************/ void bmod(float *row, float *col, float *inner) { int i, j, k; for (i=0; i<bots_arg_size_1; i++) for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) inner[i*bots_arg_size_1+j] = inner[i*bots_arg_size_1+j] - row[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } /*********************************************************************** * fwd: **********************************************************************/ void fwd(float *diag, float *col) { int i, j, k; for (j=0; j<bots_arg_size_1; j++) for (k=0; k<bots_arg_size_1; k++) for (i=k+1; i<bots_arg_size_1; i++) col[i*bots_arg_size_1+j] = col[i*bots_arg_size_1+j] - diag[i*bots_arg_size_1+k]*col[k*bots_arg_size_1+j]; } void sparselu_init (float ***pBENCH, char *pass) { *pBENCH = (float **) malloc(bots_arg_size*bots_arg_size*sizeof(float *)); genmat(*pBENCH); print_structure(pass, *pBENCH); } void sparselu_par_call(float **BENCH) { int ii, jj, kk; srand(10); fprintf(stderr,"Computing SparseLU Factorization (%dx%d matrix with %dx%d blocks) ", bots_arg_size,bots_arg_size,bots_arg_size_1,bots_arg_size_1); #pragma omp parallel #pragma omp single { double d1 = omp_get_wtime(); for (kk=0; kk<bots_arg_size; kk++) { int rnd = rand()%USE_FALSE; if (rnd == 0) #pragma omp task firstprivate(kk) shared(BENCH) depend(inout:BENCH[kk*bots_arg_size+kk],) lu0(BENCH[kk*bots_arg_size+kk]); if (rnd == 1) #pragma omp task firstprivate(kk) shared(BENCH) depend(inout:BENCH[kk*bots_arg_size+kk],FALSE_ARRAY[0]) lu0(BENCH[kk*bots_arg_size+kk]); if (rnd == 2) #pragma omp task firstprivate(kk) shared(BENCH) depend(inout:BENCH[kk*bots_arg_size+kk]) depend(in:FALSE_ARRAY[0]) lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { int rnd = rand()%USE_FALSE; if (rnd == 0) #pragma omp task firstprivate(kk, jj) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj]) depend(inout:BENCH[kk*bots_arg_size+kk]) fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); if (rnd == 1) #pragma omp task firstprivate(kk, jj) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj],FALSE_ARRAY[0]) depend(inout:BENCH[kk*bots_arg_size+kk]) fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); if (rnd == 2) #pragma omp task firstprivate(kk, jj) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj]) depend(inout:BENCH[kk*bots_arg_size+kk],FALSE_ARRAY[0]) fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) { int rnd = rand()%USE_FALSE; if (rnd == 0) #pragma omp task firstprivate(kk, ii) shared(BENCH) depend(inout:BENCH[ii*bots_arg_size+kk]) depend(in:BENCH[kk*bots_arg_size+kk]) bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); if (rnd == 1) #pragma omp task firstprivate(kk, ii) shared(BENCH) depend(inout:BENCH[ii*bots_arg_size+kk]) depend(in:BENCH[kk*bots_arg_size+kk],FALSE_ARRAY[0]) bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); if (rnd == 2) #pragma omp task firstprivate(kk, ii) shared(BENCH) depend(inout:BENCH[ii*bots_arg_size+kk],FALSE_ARRAY[0]) depend(in:BENCH[kk*bots_arg_size+kk]) bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { int rnd = rand()%USE_FALSE; if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); if (rnd == 0) #pragma omp task firstprivate(kk, jj, ii) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj],BENCH[ii*bots_arg_size+kk]) depend(inout:BENCH[ii*bots_arg_size+jj]) bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); if (rnd == 1) #pragma omp task firstprivate(kk, jj, ii) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj],BENCH[ii*bots_arg_size+kk],FALSE_ARRAY[0]) depend(inout:BENCH[ii*bots_arg_size+jj]) bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); if (rnd == 2) #pragma omp task firstprivate(kk, jj, ii) shared(BENCH) depend(in:BENCH[kk*bots_arg_size+jj],BENCH[ii*bots_arg_size+kk]) depend(inout:BENCH[ii*bots_arg_size+jj],FALSE_ARRAY[0]) bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } #pragma omp taskwait double d2 = omp_get_wtime(); fprintf(stderr," Par Time: %f\n",d2-d1); } fprintf(stderr," completed!\n"); } void sparselu_seq_call(float **BENCH) { int ii, jj, kk; double d1 = omp_get_wtime(); for (kk=0; kk<bots_arg_size; kk++) { lu0(BENCH[kk*bots_arg_size+kk]); for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { fwd(BENCH[kk*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) { bdiv (BENCH[kk*bots_arg_size+kk], BENCH[ii*bots_arg_size+kk]); } for (ii=kk+1; ii<bots_arg_size; ii++) if (BENCH[ii*bots_arg_size+kk] != NULL) for (jj=kk+1; jj<bots_arg_size; jj++) if (BENCH[kk*bots_arg_size+jj] != NULL) { if (BENCH[ii*bots_arg_size+jj]==NULL) BENCH[ii*bots_arg_size+jj] = allocate_clean_block(); bmod(BENCH[ii*bots_arg_size+kk], BENCH[kk*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); } } double d2 = omp_get_wtime(); fprintf(stderr,"Serial Time: %f\n",d2-d1); } void sparselu_fini (float **BENCH, char *pass) { print_structure(pass, BENCH); } int sparselu_check(float **SEQ, float **BENCH) { int ii,jj,ok=1; for (ii=0; ((ii<bots_arg_size) && ok); ii++) { for (jj=0; ((jj<bots_arg_size) && ok); jj++) { if ((SEQ[ii*bots_arg_size+jj] == NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] == NULL)) ok = FALSE; if ((SEQ[ii*bots_arg_size+jj] != NULL) && (BENCH[ii*bots_arg_size+jj] != NULL)) ok = checkmat(SEQ[ii*bots_arg_size+jj], BENCH[ii*bots_arg_size+jj]); if(!ok)abort(); } } if (ok) fprintf(stderr,"stämmer\n"); if (ok) return BOTS_RESULT_SUCCESSFUL; else return BOTS_RESULT_UNSUCCESSFUL; } int main ( int argc, char *argv[]) { float **SEQ,**BENCH; sparselu_init(&BENCH,"benchmark"); sparselu_par_call(BENCH); sparselu_fini(BENCH,"benchmark"); sparselu_init(&SEQ,"serial"); sparselu_seq_call(SEQ); sparselu_fini(SEQ,"serial"); fprintf(stderr,"Testar om Parallel och Seriell version stämmer med varandra...\n"); return (sparselu_check(SEQ,BENCH) == BOTS_RESULT_SUCCESSFUL) ? 0 : 1; }
shortcut_layer.c
#include "shortcut_layer.h" #include "convolutional_layer.h" #include "dark_cuda.h" #include "blas.h" #include "utils.h" #include "gemm.h" #include <stdio.h> #include <assert.h> layer make_shortcut_layer(int batch, int n, int *input_layers, int* input_sizes, int w, int h, int c, float **layers_output, float **layers_delta, float **layers_output_gpu, float **layers_delta_gpu, WEIGHTS_TYPE_T weights_type, WEIGHTS_NORMALIZATION_T weights_normalizion, ACTIVATION activation, int train) { fprintf(stderr, "Shortcut Layer: "); int i; for(i = 0; i < n; ++i) fprintf(stderr, "%d, ", input_layers[i]); layer l = { (LAYER_TYPE)0 }; l.train = train; l.type = SHORTCUT; l.batch = batch; l.activation = activation; l.n = n; l.input_layers = input_layers; l.input_sizes = input_sizes; l.layers_output = layers_output; l.layers_delta = layers_delta; l.weights_type = weights_type; l.weights_normalizion = weights_normalizion; l.learning_rate_scale = 1; // not necessary //l.w = w2; //l.h = h2; //l.c = c2; l.w = l.out_w = w; l.h = l.out_h = h; l.c = l.out_c = c; l.outputs = w*h*c; l.inputs = l.outputs; //if(w != w2 || h != h2 || c != c2) fprintf(stderr, " w = %d, w2 = %d, h = %d, h2 = %d, c = %d, c2 = %d \n", w, w2, h, h2, c, c2); l.index = l.input_layers[0]; if (train) l.delta = (float*)xcalloc(l.outputs * batch, sizeof(float)); l.output = (float*)xcalloc(l.outputs * batch, sizeof(float)); if (l.weights_type == PER_FEATURE) l.nweights = (l.n + 1); else if (l.weights_type == PER_CHANNEL) l.nweights = (l.n + 1) * l.c; if (l.nweights > 0) { l.weights = (float*)calloc(l.nweights, sizeof(float)); float scale = sqrt(2. / l.nweights); for (i = 0; i < l.nweights; ++i) l.weights[i] = 1;// scale*rand_uniform(-1, 1); // rand_normal(); if (train) l.weight_updates = (float*)calloc(l.nweights, sizeof(float)); l.update = update_shortcut_layer; } l.forward = forward_shortcut_layer; l.backward = backward_shortcut_layer; #ifndef GPU if (l.activation == SWISH || l.activation == MISH) l.activation_input = (float*)calloc(l.batch*l.outputs, sizeof(float)); #endif // GPU #ifdef GPU if (l.activation == SWISH || l.activation == MISH) l.activation_input_gpu = cuda_make_array(l.activation_input, l.batch*l.outputs); l.forward_gpu = forward_shortcut_layer_gpu; l.backward_gpu = backward_shortcut_layer_gpu; if (l.nweights > 0) { l.update_gpu = update_shortcut_layer_gpu; l.weights_gpu = cuda_make_array(l.weights, l.nweights); if (train) l.weight_updates_gpu = cuda_make_array(l.weight_updates, l.nweights); } if (train) l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch); l.output_gpu = cuda_make_array(l.output, l.outputs*batch); l.input_sizes_gpu = cuda_make_int_array_new_api(input_sizes, l.n); l.layers_output_gpu = (float**)cuda_make_array_pointers((void**)layers_output_gpu, l.n); l.layers_delta_gpu = (float**)cuda_make_array_pointers((void**)layers_delta_gpu, l.n); #endif // GPU l.bflops = l.out_w * l.out_h * l.out_c * l.n / 1000000000.; if (l.weights_type) l.bflops *= 2; fprintf(stderr, " wt = %d, wn = %d, outputs:%4d x%4d x%4d %5.3f BF\n", l.weights_type, l.weights_normalizion, l.out_w, l.out_h, l.out_c, l.bflops); return l; } void resize_shortcut_layer(layer *l, int w, int h, network *net) { //assert(l->w == l->out_w); //assert(l->h == l->out_h); l->w = l->out_w = w; l->h = l->out_h = h; l->outputs = w*h*l->out_c; l->inputs = l->outputs; if (l->train) l->delta = (float*)xrealloc(l->delta, l->outputs * l->batch * sizeof(float)); l->output = (float*)xrealloc(l->output, l->outputs * l->batch * sizeof(float)); int i; for (i = 0; i < l->n; ++i) { int index = l->input_layers[i]; l->input_sizes[i] = net->layers[index].outputs; l->layers_output[i] = net->layers[index].output; l->layers_delta[i] = net->layers[index].delta; assert(l->w == net->layers[index].out_w && l->h == net->layers[index].out_h); } if (l->activation == SWISH || l->activation == MISH) l->activation_input = (float*)realloc(l->activation_input, l->batch*l->outputs * sizeof(float)); #ifdef GPU cuda_free(l->output_gpu); l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch); if (l->train) { cuda_free(l->delta_gpu); l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch); } float **layers_output_gpu = (float **)calloc(l->n, sizeof(float *)); float **layers_delta_gpu = (float **)calloc(l->n, sizeof(float *)); for (i = 0; i < l->n; ++i) { const int index = l->input_layers[i]; layers_output_gpu[i] = net->layers[index].output_gpu; layers_delta_gpu[i] = net->layers[index].delta_gpu; } memcpy_ongpu(l->input_sizes_gpu, l->input_sizes, l->n * sizeof(int)); memcpy_ongpu(l->layers_output_gpu, layers_output_gpu, l->n * sizeof(float*)); memcpy_ongpu(l->layers_delta_gpu, layers_delta_gpu, l->n * sizeof(float*)); free(layers_output_gpu); free(layers_delta_gpu); if (l->activation == SWISH || l->activation == MISH) { cuda_free(l->activation_input_gpu); l->activation_input_gpu = cuda_make_array(l->activation_input, l->batch*l->outputs); } #endif } void forward_shortcut_layer(const layer l, network_state state) { int from_w = state.net.layers[l.index].w; int from_h = state.net.layers[l.index].h; int from_c = state.net.layers[l.index].c; if (l.nweights == 0 && l.n == 1 && from_w == l.w && from_h == l.h && from_c == l.c) { int size = l.batch * l.w * l.h * l.c; int i; #pragma omp parallel for for(i = 0; i < size; ++i) l.output[i] = state.input[i] + state.net.layers[l.index].output[i]; } else { shortcut_multilayer_cpu(l.outputs * l.batch, l.outputs, l.batch, l.n, l.input_sizes, l.layers_output, l.output, state.input, l.weights, l.nweights, l.weights_normalizion); } //copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1); //shortcut_cpu(l.batch, from_w, from_h, from_c, state.net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.output); //activate_array(l.output, l.outputs*l.batch, l.activation); if (l.activation == SWISH) activate_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.output); else if (l.activation == MISH) activate_array_mish(l.output, l.outputs*l.batch, l.activation_input, l.output); else activate_array_cpu_custom(l.output, l.outputs*l.batch, l.activation); } void backward_shortcut_layer(const layer l, network_state state) { if (l.activation == SWISH) gradient_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.delta); else if (l.activation == MISH) gradient_array_mish(l.outputs*l.batch, l.activation_input, l.delta); else gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta); backward_shortcut_multilayer_cpu(l.outputs * l.batch, l.outputs, l.batch, l.n, l.input_sizes, l.layers_delta, state.delta, l.delta, l.weights, l.weight_updates, l.nweights, state.input, l.layers_output, l.weights_normalizion); //axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1); //shortcut_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta); } void update_shortcut_layer(layer l, int batch, float learning_rate_init, float momentum, float decay) { if (l.nweights > 0) { float learning_rate = learning_rate_init*l.learning_rate_scale; //float momentum = a.momentum; //float decay = a.decay; //int batch = a.batch; axpy_cpu(l.nweights, -decay*batch, l.weights, 1, l.weight_updates, 1); axpy_cpu(l.nweights, learning_rate / batch, l.weight_updates, 1, l.weights, 1); scal_cpu(l.nweights, momentum, l.weight_updates, 1); } } #ifdef GPU void forward_shortcut_layer_gpu(const layer l, network_state state) { //copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1); //simple_copy_ongpu(l.outputs*l.batch, state.input, l.output_gpu); //shortcut_gpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu); //input_shortcut_gpu(state.input, l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu); //----------- //if (l.outputs == l.input_sizes[0]) //if(l.n == 1 && l.nweights == 0) //{ // input_shortcut_gpu(state.input, l.batch, state.net.layers[l.index].w, state.net.layers[l.index].h, state.net.layers[l.index].c, // state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu); //} //else { shortcut_multilayer_gpu(l.outputs, l.batch, l.n, l.input_sizes_gpu, l.layers_output_gpu, l.output_gpu, state.input, l.weights_gpu, l.nweights, l.weights_normalizion); } if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu); else activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation); } void backward_shortcut_layer_gpu(const layer l, network_state state) { if (l.activation == SWISH) gradient_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu); else if (l.activation == MISH) gradient_array_mish_ongpu(l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu); else gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu); backward_shortcut_multilayer_gpu(l.outputs, l.batch, l.n, l.input_sizes_gpu, l.layers_delta_gpu, state.delta, l.delta_gpu, l.weights_gpu, l.weight_updates_gpu, l.nweights, state.input, l.layers_output_gpu, l.weights_normalizion); //axpy_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1, state.delta, 1); //shortcut_gpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta_gpu, l.w, l.h, l.c, state.net.layers[l.index].delta_gpu); } void update_shortcut_layer_gpu(layer l, int batch, float learning_rate_init, float momentum, float decay) { if (l.nweights > 0) { float learning_rate = learning_rate_init*l.learning_rate_scale; //float momentum = a.momentum; //float decay = a.decay; //int batch = a.batch; fix_nan_and_inf(l.weight_updates_gpu, l.nweights); fix_nan_and_inf(l.weights_gpu, l.nweights); axpy_ongpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1); axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1); scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1); //if (l.clip) { // constrain_gpu(l.nweights, l.clip, l.weights_gpu, 1); //} } } void pull_shortcut_layer(layer l) { cuda_pull_array_async(l.weights_gpu, l.weights, l.nweights); CHECK_CUDA(cudaPeekAtLastError()); CHECK_CUDA(cudaStreamSynchronize(get_cuda_stream())); } void push_shortcut_layer(layer l) { cuda_push_array(l.weights_gpu, l.weights, l.nweights); CHECK_CUDA(cudaPeekAtLastError()); } #endif
rose_reduction_max.c
#include "omp.h" double a[10]; int foo() { double max_val = - 1e99; double min_val = 1e99; int i; #pragma omp parallel for private (i) reduction (max:max_val) reduction (min:min_val) for (i = 0; i <= 9; i += 1) { if (a[i] > max_val) { max_val = a[i]; } if (a[i] < min_val) min_val = a[i]; } }
task-two.c
/* * task-two.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run-race | FileCheck %s #include <omp.h> #include <stdio.h> #include <unistd.h> #define NUM_THREADS 2 int main(int argc, char *argv[]) { int var = 0; int i; #pragma omp parallel for num_threads(NUM_THREADS) shared(var) schedule(static, \ 1) for (i = 0; i < NUM_THREADS; i++) { #pragma omp task shared(var) if (0) // the task is inlined an executed locally { var++; } } int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK-NEXT: {{(Write|Read)}} of size 4 // CHECK-NEXT: #0 {{.*}}task-two.c:30 // CHECK: Previous write of size 4 // CHECK-NEXT: #0 {{.*}}task-two.c:30 // CHECK: DONE // CHECK: ThreadSanitizer: reported 1 warnings
convolutionbnrelu_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convbnrelu3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt, const Mat& _a_data, const Mat& _b_data) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; const float* a_data = _a_data; const float* b_data = _b_data; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); float a = a_data[p]; float b = b_data[p]; const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i+1 < outh; i+=2) { int remain = outw; for (; remain>0; remain--) { float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } int remain = outw * outh; float* outptr = out; for (; remain>0; remain--) { *outptr = b*(*outptr)+a; if(*outptr < 0) *outptr = 0; outptr++; } } }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP clause. class OMPClause { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Ending location of the clause. SourceLocation EndLoc; /// Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// Returns the starting location of the clause. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns the ending location of the clause. SourceLocation getEndLoc() const { return EndLoc; } /// Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// This structure contains most locations needed for by an OMPVarListClause. struct OMPVarListLocTy { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Location of '('. SourceLocation LParenLoc; /// Ending location of the clause. SourceLocation EndLoc; OMPVarListLocTy() = default; OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {} }; /// This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of variables in the list. unsigned NumVars; protected: /// Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// This represents 'allocator' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp allocate(a) allocator(omp_default_mem_alloc) /// \endcode /// In this example directive '#pragma omp allocate' has simple 'allocator' /// clause with the allocator 'omp_default_mem_alloc'. class OMPAllocatorClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression with the allocator. Stmt *Allocator = nullptr; /// Set allocator. void setAllocator(Expr *A) { Allocator = A; } public: /// Build 'allocator' clause with the given allocator. /// /// \param A Allocator. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc), Allocator(A) {} /// Build an empty clause. OMPAllocatorClause() : OMPClause(OMPC_allocator, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns allocator. Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); } child_range children() { return child_range(&Allocator, &Allocator + 1); } const_child_range children() const { return const_child_range(&Allocator, &Allocator + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_allocator; } }; /// This represents clause 'allocate' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// and clause 'allocate' for the variable 'a'. class OMPAllocateClause final : public OMPVarListClause<OMPAllocateClause>, private llvm::TrailingObjects<OMPAllocateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Allocator specified in the clause, or 'nullptr' if the default one is /// used. Expr *Allocator = nullptr; /// Position of the ':' delimiter in the clause; SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAllocateClause>(OMPC_allocate, StartLoc, LParenLoc, EndLoc, N), Allocator(Allocator), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPAllocateClause(unsigned N) : OMPVarListClause<OMPAllocateClause>(OMPC_allocate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } void setAllocator(Expr *A) { Allocator = A; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Returns the allocator expression or nullptr, if no allocator is specified. Expr *getAllocator() const { return Allocator; } /// Returns the location of the ':' delimiter. SourceLocation getColonLoc() const { return ColonLoc; } /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAllocateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_allocate; } }; /// This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Location of ':' (if any). SourceLocation ColonLoc; /// Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = OMPD_unknown; /// Name modifier location. SourceLocation NameModifierLoc; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPIfClause() : OMPClause(OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_if; } }; /// This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// Build 'final' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// Build an empty clause. OMPFinalClause() : OMPClause(OMPC_final, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_final; } }; /// This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// Build an empty clause. OMPNumThreadsClause() : OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } const_child_range children() const { return const_child_range(&NumThreads, &NumThreads + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_threads; } }; /// This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Safelen = nullptr; /// Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// Build an empty clause. explicit OMPSafelenClause() : OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } const_child_range children() const { return const_child_range(&Safelen, &Safelen + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_safelen; } }; /// This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Simdlen = nullptr; /// Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// Build an empty clause. explicit OMPSimdlenClause() : OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } const_child_range children() const { return const_child_range(&Simdlen, &Simdlen + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simdlen; } }; /// This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// Build an empty clause. explicit OMPCollapseClause() : OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_collapse; } }; /// This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. OpenMPDefaultClauseKind Kind = OMPC_DEFAULT_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPDefaultClause() : OMPClause(OMPC_default, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPDefaultClauseKind getDefaultKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_default; } }; /// This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'proc_bind' clause. OpenMPProcBindClauseKind Kind = OMPC_PROC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPProcBindClause() : OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPProcBindClauseKind getProcBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_proc_bind; } }; /// This represents 'unified_address' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_address /// \endcode /// In this example directive '#pragma omp requires' has 'unified_address' /// clause. class OMPUnifiedAddressClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_address' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_unified_address, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedAddressClause() : OMPClause(OMPC_unified_address, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_unified_address; } }; /// This represents 'unified_shared_memory' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_shared_memory /// \endcode /// In this example directive '#pragma omp requires' has 'unified_shared_memory' /// clause. class OMPUnifiedSharedMemoryClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_shared_memory' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_unified_shared_memory, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedSharedMemoryClause() : OMPClause(OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_unified_shared_memory; } }; /// This represents 'reverse_offload' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires reverse_offload /// \endcode /// In this example directive '#pragma omp requires' has 'reverse_offload' /// clause. class OMPReverseOffloadClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'reverse_offload' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_reverse_offload, StartLoc, EndLoc) {} /// Build an empty clause. OMPReverseOffloadClause() : OMPClause(OMPC_reverse_offload, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reverse_offload; } }; /// This represents 'dynamic_allocators' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires dynamic_allocators /// \endcode /// In this example directive '#pragma omp requires' has 'dynamic_allocators' /// clause. class OMPDynamicAllocatorsClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'dynamic_allocators' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_dynamic_allocators, StartLoc, EndLoc) {} /// Build an empty clause. OMPDynamicAllocatorsClause() : OMPClause(OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_dynamic_allocators; } }; /// This represents 'atomic_default_mem_order' clause in the '#pragma omp /// requires' directive. /// /// \code /// #pragma omp requires atomic_default_mem_order(seq_cst) /// \endcode /// In this example directive '#pragma omp requires' has simple /// atomic_default_mem_order' clause with kind 'seq_cst'. class OMPAtomicDefaultMemOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '(' SourceLocation LParenLoc; /// A kind of the 'atomic_default_mem_order' clause. OpenMPAtomicDefaultMemOrderClauseKind Kind = OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst', /// 'acq_rel' or 'relaxed'). /// /// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_atomic_default_mem_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPAtomicDefaultMemOrderClause() : OMPClause(OMPC_atomic_default_mem_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the locaiton of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_atomic_default_mem_order; } }; /// This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// Start location of the schedule ind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// Build an empty clause. explicit OMPScheduleClause() : OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_schedule; } }; /// This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause final : public OMPClause, private llvm::TrailingObjects<OMPOrderedClause, Expr *> { friend class OMPClauseReader; friend TrailingObjects; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Real number of loops. unsigned NumberOfLoops = 0; /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {} /// Build an empty clause. explicit OMPOrderedClause(unsigned NumLoops) : OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()), NumberOfLoops(NumLoops) {} /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPOrderedClause *Create(const ASTContext &C, Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty clause. static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } /// Set number of iterations for the specified loop. void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations); /// Get number of iterations for all the loops. ArrayRef<Expr *> getLoopNumIterations() const; /// Set loop counter for the specified loop. void setLoopCounter(unsigned NumLoop, Expr *Counter); /// Get loops counter for the specified loop. Expr *getLoopCounter(unsigned NumLoop); const Expr *getLoopCounter(unsigned NumLoop) const; child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_ordered; } }; /// This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nowait, StartLoc, EndLoc) {} /// Build an empty clause. OMPNowaitClause() : OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nowait; } }; /// This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_untied, StartLoc, EndLoc) {} /// Build an empty clause. OMPUntiedClause() : OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_untied; } }; /// This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_mergeable, StartLoc, EndLoc) {} /// Build an empty clause. OMPMergeableClause() : OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_mergeable; } }; /// This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_read, StartLoc, EndLoc) {} /// Build an empty clause. OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_read; } }; /// This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_write, StartLoc, EndLoc) {} /// Build an empty clause. OMPWriteClause() : OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_write; } }; /// This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. class OMPUpdateClause : public OMPClause { public: /// Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_update, StartLoc, EndLoc) {} /// Build an empty clause. OMPUpdateClause() : OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_update; } }; /// This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_capture, StartLoc, EndLoc) {} /// Build an empty clause. OMPCaptureClause() : OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_capture; } }; /// This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {} /// Build an empty clause. OMPSeqCstClause() : OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_seq_cst; } }; /// This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPPrivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_private; } }; /// This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_firstprivate; } }; /// This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLastprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_lastprivate; } }; /// This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPSharedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_shared; } }; /// This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>(OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPTaskReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_in_reduction; } }; /// This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// Location of linear modifier if any. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLinearClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_linear; } }; /// This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAlignedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_aligned; } }; /// This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyinClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyin; } }; /// This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyprivate; } }; /// This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFlushClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_flush; } }; /// This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// Dependency type location. SourceLocation DepLoc; /// Colon location. SourceLocation ColonLoc; /// Number of loops, associated with the depend clause. unsigned NumLoops = 0; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param NumLoops Number of loops that is associated with this depend /// clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc, EndLoc, N), NumLoops(NumLoops) {} /// Build an empty clause. /// /// \param N Number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. explicit OMPDependClause(unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), NumLoops(NumLoops) {} /// Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, unsigned NumLoops); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N, unsigned NumLoops); /// Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Get number of loops associated with the clause. unsigned getNumLoops() const { return NumLoops; } /// Set the loop data for the depend clauses with 'sink|source' kind of /// dependency. void setLoopData(unsigned NumLoop, Expr *Cnt); /// Get the loop data. Expr *getLoopData(unsigned NumLoop); const Expr *getLoopData(unsigned NumLoop) const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPDependClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_depend; } }; /// This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Device number. Stmt *Device = nullptr; /// Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } public: /// Build 'device' clause. /// /// \param E Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Device(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPDeviceClause() : OMPClause(OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// Return device number. Expr *getDevice() const { return cast<Expr>(Device); } child_range children() { return child_range(&Device, &Device + 1); } const_child_range children() const { return const_child_range(&Device, &Device + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_device; } }; /// This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_threads, StartLoc, EndLoc) {} /// Build an empty clause. OMPThreadsClause() : OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_threads; } }; /// This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_simd, StartLoc, EndLoc) {} /// Build an empty clause. OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simd; } }; /// Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: /// Class that represents a component of a mappable expression. E.g. /// for an expression S.a, the first component is a declaration reference /// expression associated with 'S' and the second is a member expression /// associated with the field declaration 'a'. If the expression is an array /// subscript it may not have any associated declaration. In that case the /// associated declaration is set to nullptr. class MappableComponent { /// Expression associated with the component. Expr *AssociatedExpression = nullptr; /// Declaration associated with the declaration. If the component does /// not have a declaration (e.g. array subscripts or section), this is set /// to nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration) : AssociatedExpression(AssociatedExpression), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpression; } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations); }; /// This structure contains all sizes needed for by an /// OMPMappableExprListClause. struct OMPMappableExprListSizeTy { /// Number of expressions listed. unsigned NumVars; /// Number of unique base declarations. unsigned NumUniqueDeclarations; /// Number of component lists. unsigned NumComponentLists; /// Total number of expression components. unsigned NumComponents; OMPMappableExprListSizeTy() = default; OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} }; /// This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// Number of component lists in this clause. unsigned NumComponentLists; /// Total number of components in this clause. unsigned NumComponents; /// C++ nested name specifier for the associated user-defined mapper. NestedNameSpecifierLoc MapperQualifierLoc; /// The associated user-defined mapper identifier information. DeclarationNameInfo MapperIdInfo; protected: /// Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. /// \param MapperQualifierLocPtr C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfoPtr The identifier of associated user-defined mapper. OMPMappableExprListClause( OpenMPClauseKind K, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes, NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr, DeclarationNameInfo *MapperIdInfoPtr = nullptr) : OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc, Sizes.NumVars), NumUniqueDeclarations(Sizes.NumUniqueDeclarations), NumComponentLists(Sizes.NumComponentLists), NumComponents(Sizes.NumComponents) { if (MapperQualifierLocPtr) MapperQualifierLoc = *MapperQualifierLocPtr; if (MapperIdInfoPtr) MapperIdInfo = *MapperIdInfoPtr; } /// Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// Set the nested name specifier of associated user-defined mapper. void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) { MapperQualifierLoc = NNSL; } /// Set the name of associated user-defined mapper. void setMapperIdInfo(DeclarationNameInfo MapperId) { MapperIdInfo = MapperId; } /// Get the user-defined mapper references that are in the trailing objects of /// the class. MutableArrayRef<Expr *> getUDMapperRefs() { return llvm::makeMutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Get the user-defined mappers references that are in the trailing objects /// of the class. ArrayRef<Expr *> getUDMapperRefs() const { return llvm::makeArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Set the user-defined mappers that are in the trailing objects of the /// class. void setUDMapperRefs(ArrayRef<Expr *> DMDs) { assert(DMDs.size() == OMPVarListClause<T>::varlist_size() && "Unexpected number of user-defined mappers."); std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin()); } public: /// Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// Gets the nested name specifier for associated user-defined mapper. NestedNameSpecifierLoc getMapperQualifierLoc() const { return MapperQualifierLoc; } /// Gets the name info for associated user-defined mapper. const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; } /// Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; } /// Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::pair<const ValueDecl *, MappableExprComponentListRef> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); return std::make_pair( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize)); } std::pair<const ValueDecl *, MappableExprComponentListRef> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end())); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } using mapperlist_iterator = MutableArrayRef<Expr *>::iterator; using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator; using mapperlist_range = llvm::iterator_range<mapperlist_iterator>; using mapperlist_const_range = llvm::iterator_range<mapperlist_const_iterator>; mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); } mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); } mapperlist_const_iterator mapperlist_begin() const { return getUDMapperRefs().begin(); } mapperlist_const_iterator mapperlist_end() const { return getUDMapperRefs().end(); } mapperlist_range mapperlists() { return mapperlist_range(mapperlist_begin(), mapperlist_end()); } mapperlist_const_range mapperlists() const { return mapperlist_const_range(mapperlist_begin(), mapperlist_end()); } }; /// This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Number of allowed map-type-modifiers. static constexpr unsigned NumberOfModifiers = OMPC_MAP_MODIFIER_last - OMPC_MAP_MODIFIER_unknown - 1; private: /// Map-type-modifiers for the 'map' clause. OpenMPMapModifierKind MapTypeModifiers[NumberOfModifiers] = { OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown}; /// Location of map-type-modifiers for the 'map' clause. SourceLocation MapTypeModifiersLoc[NumberOfModifiers]; /// Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// Location of the map type. SourceLocation MapLoc; /// Colon location. SourceLocation ColonLoc; /// Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Locations of map-type-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_map, Locs, Sizes, &MapperQualifierLoc, &MapperIdInfo), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) { assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() && "Unexpected number of map type modifiers."); llvm::copy(MapModifiers, std::begin(MapTypeModifiers)); assert(llvm::array_lengthof(MapTypeModifiersLoc) == MapModifiersLoc.size() && "Unexpected number of map type modifier locations."); llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_map, OMPVarListLocTy(), Sizes) {} /// Set map-type-modifier for the clause. /// /// \param I index for map-type-modifier. /// \param T map-type-modifier for the clause. void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) { assert(I < NumberOfModifiers && "Unexpected index to store map type modifier, exceeds array size."); MapTypeModifiers[I] = T; } /// Set location for the map-type-modifier. /// /// \param I index for map-type-modifier location. /// \param TLoc map-type-modifier location. void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfModifiers && "Index to store map type modifier location exceeds array size."); MapTypeModifiersLoc[I] = TLoc; } /// Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Location of map-type-modifiers. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// Creates an empty clause with the place for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// Fetches the map-type-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for map-type-modifier. OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfModifiers && "Requested modifier exceeds the total number of modifiers."); return MapTypeModifiers[Cnt]; } /// Fetches the map-type-modifier location at 'Cnt' index of array of /// modifiers' locations. /// /// \param Cnt index for map-type-modifier location. SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfModifiers && "Requested modifier location exceeds total number of modifiers."); return MapTypeModifiersLoc[Cnt]; } /// Fetches ArrayRef of map-type-modifiers. ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiers); } /// Fetches ArrayRef of location of map-type-modifiers. ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiersLoc); } /// Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPMapClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_map; } }; /// This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// NumTeams number. Stmt *NumTeams = nullptr; /// Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPNumTeamsClause() : OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } const_child_range children() const { return const_child_range(&NumTeams, &NumTeams + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_teams; } }; /// This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// ThreadLimit number. Stmt *ThreadLimit = nullptr; /// Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPThreadLimitClause() : OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } const_child_range children() const { return const_child_range(&ThreadLimit, &ThreadLimit + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_thread_limit; } }; /// This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Priority number. Stmt *Priority = nullptr; /// Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// Build 'priority' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_priority, StartLoc, EndLoc), LParenLoc(LParenLoc), Priority(E) {} /// Build an empty clause. OMPPriorityClause() : OMPClause(OMPC_priority, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } const_child_range children() const { return const_child_range(&Priority, &Priority + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_priority; } }; /// This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Grainsize = nullptr; /// Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_grainsize, StartLoc, EndLoc), LParenLoc(LParenLoc), Grainsize(Size) {} /// Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } const_child_range children() const { return const_child_range(&Grainsize, &Grainsize + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_grainsize; } }; /// This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nogroup, StartLoc, EndLoc) {} /// Build an empty clause. OMPNogroupClause() : OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nogroup; } }; /// This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *NumTasks = nullptr; /// Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_tasks, StartLoc, EndLoc), LParenLoc(LParenLoc), NumTasks(Size) {} /// Build an empty clause. explicit OMPNumTasksClause() : OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } const_child_range children() const { return const_child_range(&NumTasks, &NumTasks + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_tasks; } }; /// This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// Build an empty clause. OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } const_child_range children() const { return const_child_range(&Hint, &Hint + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_hint; } }; /// This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// Start location of the schedule kind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPDistScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_dist_schedule; } }; /// This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// Locations of modifiers. SourceLocation ModifierLoc; /// A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_defaultmap; } }; /// This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_to, Locs, Sizes, &MapperQualifierLoc, &MapperIdInfo) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_to, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPToClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_to; } }; /// This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_from, Locs, Sizes, &MapperQualifierLoc, &MapperIdInfo) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_from, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPFromClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFromClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_use_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_use_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_use_device_ptr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_is_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(OMPC_is_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPIsDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_is_device_ptr; } }; /// This class implements a simple visitor for OMPClause /// subclasses. template<class ImplClass, template <typename> class Ptr, typename RetTy> class OMPClauseVisitorBase { public: #define PTR(CLASS) typename Ptr<CLASS>::type #define DISPATCH(CLASS) \ return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S)) #define OPENMP_CLAUSE(Name, Class) \ RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); } #include "clang/Basic/OpenMPKinds.def" RetTy Visit(PTR(OMPClause) S) { // Top switch clause: visit each OMPClause. switch (S->getClauseKind()) { default: llvm_unreachable("Unknown clause kind!"); #define OPENMP_CLAUSE(Name, Class) \ case OMPC_ ## Name : return Visit ## Class(static_cast<PTR(Class)>(S)); #include "clang/Basic/OpenMPKinds.def" } } // Base case, ignore it. :) RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); } #undef PTR #undef DISPATCH }; template <typename T> using const_ptr = typename std::add_pointer<typename std::add_const<T>::type>; template<class ImplClass, typename RetTy = void> class OMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, std::add_pointer, RetTy> {}; template<class ImplClass, typename RetTy = void> class ConstOMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {}; class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> { raw_ostream &OS; const PrintingPolicy &Policy; /// Process clauses with list of variables. template <typename T> void VisitOMPClauseList(T *Node, char StartSym); public: OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy) : OS(OS), Policy(Policy) {} #define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *S); #include "clang/Basic/OpenMPKinds.def" }; } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
middle6r.c
/* * Date: 11 December 2015 * Contact: Thomas Peyrin - thomas.peyrin@gmail.com */ /* * Simmulation of boomerang analysis for Skinny * Date: March 21, 2020 * Author: Hosein Hadipour * Contact: hsn.hadipour@gmail.com */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include <omp.h> #include <stdbool.h> // #define DEBUG 1 #define Nthreads 16 // Table that encodes the parameters of the various Skinny versions: // (block size, key size, number of rounds) //Skinny-64-64: 32 rounds //Skinny-64-128: 36 rounds //Skinny-64-192: 40 rounds //Skinny-128-128: 40 rounds //Skinny-128-256: 48 rounds //Skinny-128-384: 56 rounds int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}}; // Packing of data is done as follows (state[i][j] stands for row i and column j): // 0 1 2 3 // 4 5 6 7 // 8 9 10 11 //12 13 14 15 // 4-bit Sbox const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15}; const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15}; // 8-bit Sbox const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff}; const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff}; // ShiftAndSwitchRows permutation const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12}; const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14}; // Tweakey permutation const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7}; const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1}; // round constants const unsigned char RC[62] = { 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A, 0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13, 0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28, 0x10, 0x20}; FILE *fic; void init_prng(int offset) { //int initial_seed = 0x5EC7F2B0; //int initial_seed = 0x30051991; My birthday! size_t initial_seed = 10*time(NULL) + 100*(size_t)(offset); srand(initial_seed); // Initialization, should only be called once. int r = rand(); printf("[+] PRNG initialized to 0x%08zX\n", initial_seed); } void display_matrix(unsigned char state[4][4], int ver) { int i; unsigned char input[16]; if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); for (i = 0; i < 8; i++) fprintf(fic, "%02x", input[i]); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; for (i = 0; i < 16; i++) fprintf(fic, "%02x", input[i]); } } void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int k; fprintf(fic, "S = "); display_matrix(state, ver); for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { fprintf(fic, " - TK%i = ", k + 1); display_matrix(keyCells[k], ver); } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the TWEAKEY permutation pos = TWEAKEY_P[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { //application of LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function} void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse TWEAKEY permutation pos = TWEAKEY_P_inv[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 2; i <= 3; i++) { for (j = 0; j < 4; j++) { //application of inverse LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } } // Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state void AddConstants(unsigned char state[4][4], int r) { state[0][0] ^= (RC[r] & 0xf); state[1][0] ^= ((RC[r] >> 4) & 0x3); state[2][0] ^= 0x2; } // apply the 4-bit Sbox void SubCell4(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4[state[i][j]]; } // apply the 4-bit inverse Sbox void SubCell4_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4_inv[state[i][j]]; } // apply the 8-bit Sbox void SubCell8(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8[state[i][j]]; } // apply the 8-bit inverse Sbox void SubCell8_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8_inv[state[i][j]]; } // Apply the ShiftRows function void ShiftRows(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the ShiftRows permutation pos = P[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the inverse ShiftRows function void ShiftRows_inv(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse ShiftRows permutation pos = P_inv[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the linear diffusion matrix //M = //1 0 1 1 //1 0 0 0 //0 1 1 0 //1 0 1 0 void MixColumn(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { state[1][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[3][j] ^= state[2][j]; temp = state[3][j]; state[3][j] = state[2][j]; state[2][j] = state[1][j]; state[1][j] = state[0][j]; state[0][j] = temp; } } // Apply the inverse linear diffusion matrix void MixColumn_inv(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { temp = state[3][j]; state[3][j] = state[0][j]; state[0][j] = state[1][j]; state[1][j] = state[2][j]; state[2][j] = temp; state[3][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[1][j] ^= state[2][j]; } } // decryption function of Skinny void dec(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char dummy[4][4] = {{0}}; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } for (i = r - 1; i >= 0; i--) { AddKey(dummy, keyCells, ver); } #ifdef DEBUG fprintf(fic, "DEC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = r - 1; i >= 0; i--) { MixColumn_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey_inv(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) SubCell4_inv(state); else SubCell8_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } #ifdef DEBUG fprintf(fic, "DEC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // encryption function of Skinny void enc(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } #ifdef DEBUG fprintf(fic, "ENC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = 0; i < r; i++) { if (versions[ver][0] == 64) SubCell4(state); else SubCell8(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after SubCell: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddConstants: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddKey: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif MixColumn(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after MixColumn: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } //The last subtweakey should not be added #ifdef DEBUG fprintf(fic, "ENC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // generate test vectors for all the versions of Skinny void TestVectors(int ver) { unsigned char p[16]; unsigned char c[16]; unsigned char k[48]; int n; for (n = 1; n < 10; n++) { int i; for (i = 0; i < (versions[ver][0] >> 3); i++) c[i] = p[i] = rand() & 0xff; for (i = 0; i < (versions[ver][0] >> 3); i++) printf("%02x", p[i]); printf("\n"); for (i = 0; i < (versions[ver][1] >> 3); i++) k[i] = rand() & 0xff; fprintf(fic, "TK = "); for (i = 0; i < (versions[ver][1] >> 3); i++) fprintf(fic, "%02x", k[i]); fprintf(fic, "\n"); fprintf(fic, "P = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", p[i]); fprintf(fic, "\n"); enc(c, k, ver, 10); fprintf(fic, "C = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n"); dec(c, k, ver, 10); fprintf(fic, "P' = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n\n"); } } int boomerang(int r, int ver, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2) { int i; unsigned char p1[16], p2[16]; unsigned char c3[16], c4[16]; unsigned char k1[48], k2[48], k3[48], k4[48]; // randomly choose k1 for (i = 0; i < (versions[ver][1] >> 3); i++) k1[i] = rand() & 0xff; // derive k2 for (i = 0; i < (versions[ver][1] >> 3); i++) k2[i] = k1[i] ^ dk1[i]; // derive k3 for (i = 0; i < (versions[ver][1] >> 3); i++) k3[i] = k1[i] ^ dk2[i]; // derive k4 for (i = 0; i < (versions[ver][1] >> 3); i++) k4[i] = k2[i] ^ dk2[i]; int num = 0; for (int t = 0; t < N3; t++) { // randomly choose p1 for (i = 0; i < (versions[ver][0] >> 3); i++) p1[i] = (rand() ^ c3[i]) & 0xff; // derive p2 for (i = 0; i < (versions[ver][0] >> 3); i++) p2[i] = p1[i] ^ dp[i]; enc(p1, k1, ver, r); enc(p2, k2, ver, r); // derive c3 for (i = 0; i < (versions[ver][0] >> 3); i++) c3[i] = p1[i] ^ dc[i]; // derive c4 for (i = 0; i < (versions[ver][0] >> 3); i++) c4[i] = p2[i] ^ dc[i]; dec(c3, k3, ver, r); dec(c4, k4, ver, r); bool flag = 1; for (i = 0; i < (versions[ver][0] >> 3); i++) if ((c3[i] ^ c4[i]) != dp[i]) flag = 0; if (flag) { num++; } } return num; } double send_boomerangs(int R, int ver, int N1, int N2, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2) { // Parallel execution int NUM[N1]; int counter; printf("#Rounds: %d rounds\n", R); printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %d * %d = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2)); clock_t clock_timer; double wall_timer; clock_timer = clock(); wall_timer = omp_get_wtime(); omp_set_num_threads(N1); #pragma omp parallel for for (counter = 0; counter < N1; counter++) { int num = 0; int ID = omp_get_thread_num(); init_prng(ID); for (int j = 0; j < N2; j++) { num += boomerang(R, ver, N3, dp, dc, dk1, dk2); } NUM[ID] = num; } printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC); printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer); double sum = 0; double sum_temp = 1; for (int i = 0; i < N1; i++) sum += NUM[i]; printf("sum = %f\n", sum); sum_temp = (double)(N1 * N2 * N3) / sum; printf("2^(-%f)\n\n", log(sum_temp) / log(2)); printf("##########################\n"); return sum; } void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16]) { for (int i = 0; i < (versions[ver][0] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48]) { for (int i = 0; i < (versions[ver][1] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } int main() { // srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand(); // init_prng(1); unsigned char dp[16]; unsigned char dc[16]; unsigned char dk1[48]; unsigned char dk2[48]; // ####################################################################################################### // ####################################################################################################### // ############################## User must change only the following lines ############################## int n = 100; // Number of independent experiments int R = 6; // Number of rounds int ver = 2; // Determine the version: // [0 = Skinny-64-64] // [1 = Skinny-64-128] // [2 = Skinny-64-192] // [3 = Skinny-128-128] // [4 = Skinny-128-256] // [5 = Skinny-128-384] char dp_str[] = "0000000000020000"; char dc_str[] = "0000000000000000"; char dk1_str[] = "00000000000000B000000000000000A00000000000000030"; char dk2_str[] = "000000000004000000000000000B00000000000000090000"; // char dp_str[] = "0000000000040000"; // char dc_str[] = "0000000000000000"; // char dk1_str[] = "000000000000007000000000000000500000000000000060"; // char dk2_str[] = "000000000002000000000000000500000000000000040000"; // ####################################################################################################### // ####################################################################################################### convert_hexstr_to_statearray(ver, dp_str, dp); convert_hexstr_to_statearray(ver, dc_str, dc); convert_hexstr_to_tweakarray(ver, dk1_str, dk1); convert_hexstr_to_tweakarray(ver, dk2_str, dk2); //########################## Number of queries ######################### int N1 = Nthreads; // Number of parallel threads : N1 int deg1 = 10; int deg2 = 12; int N2 = 1 << deg1; // Number of bunches per thread : N2 = 2^(deg) int N3 = 1 << deg2; // Number of queries per bunch : N3 //################### Number of total queries : N1*N2*N3 ############### double sum = 0; for (int i = 0; i < n; i++) { sum += send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2); } printf("\nAverage = 2^(-%0.4f)\n", (log(n) + log(N1) + log(N2) + log(N3) - log(sum))/log(2)); // sum = (double)(n * N1 * N2 * N3) / sum; // printf("\nAverage = 2^(-%0.2f)\n", log(sum) / log(2)); return 0; }
estimate_dt_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela, Ruben Zorrilla // // #ifndef KRATOS_ESTIMATE_DT_UTILITIES_H #define KRATOS_ESTIMATE_DT_UTILITIES_H // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/node.h" #include "includes/element.h" #include "includes/model_part.h" #include "includes/kratos_parameters.h" #include "includes/serializer.h" #include "utilities/openmp_utils.h" #include "utilities/geometry_utilities.h" namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Classes ///@{ /// Estimate the time step in a fluid problem to obtain a given Courant number. template< unsigned int TDim > class EstimateDtUtility { public: ///@name Life Cycle ///@{ /// Constructor /** * @param ModelPart The model part containing the problem mesh * @param CFL The user-defined Courant-Friedrichs-Lewy number * @param DtMin user-defined minimum time increment allowed * @param DtMax user-defined maximum time increment allowed */ EstimateDtUtility(ModelPart &ModelPart, const double CFL, const double DtMin, const double DtMax): mrModelPart(ModelPart) { mCFL = CFL; mDtMin = DtMin; mDtMax = DtMax; } /// Constructor with Kratos parameters /** * @param ModelPart The model part containing the problem mesh * @param rParameters Kratos parameters containing the CFL number and max time step */ EstimateDtUtility(ModelPart& ModelPart, Parameters& rParameters): mrModelPart(ModelPart) { Parameters defaultParameters(R"({ "automatic_time_step" : true, "CFL_number" : 1.0, "minimum_delta_time" : 1e-4, "maximum_delta_time" : 0.1 })"); rParameters.ValidateAndAssignDefaults(defaultParameters); mCFL = rParameters["CFL_number"].GetDouble(); mDtMin = rParameters["minimum_delta_time"].GetDouble(); mDtMax = rParameters["maximum_delta_time"].GetDouble(); } /// Destructor ~EstimateDtUtility() {} ///@} ///@name Operations ///@{ /// Set the CFL value. /** * @param CFL the user-defined CFL number used in the automatic time step computation */ void SetCFL(const double CFL) { mCFL = CFL; } /// Set the maximum time step allowed value. /** * @param CFL the user-defined CFL number used in the automatic time step computation */ void SetDtMin(const double DtMin) { mDtMin = DtMin; } /// Set the maximum time step allowed value. /** * @param CFL the user-defined CFL number used in the automatic time step computation */ void SetDtMax(const double DtMax) { mDtMax = DtMax; } /// Calculate the maximum time step that satisfies the Courant-Friedrichs-Lewy (CFL) condition. /** * @return A time step value that satisfies the CFL condition for the current mesh and velocity field */ double EstimateDt() { KRATOS_TRY; unsigned int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector ElementPartition; OpenMPUtils::DivideInPartitions(mrModelPart.NumberOfElements(),NumThreads,ElementPartition); double CurrentDt = mrModelPart.GetProcessInfo().GetValue(DELTA_TIME); std::vector<double> MaxCFL(NumThreads,0.0); #pragma omp parallel shared(MaxCFL) { int k = OpenMPUtils::ThisThread(); ModelPart::ElementIterator ElemBegin = mrModelPart.ElementsBegin() + ElementPartition[k]; ModelPart::ElementIterator ElemEnd = mrModelPart.ElementsBegin() + ElementPartition[k+1]; GeometryDataContainer GeometryInfo; double MaxLocalCFL = 0.0; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { double ElementCFL = CalculateElementCFL(*itElem,GeometryInfo,CurrentDt); if (ElementCFL > MaxLocalCFL) { MaxLocalCFL = ElementCFL; } } MaxCFL[k] = MaxLocalCFL; } // Reduce to maximum the thread results // Note that MSVC14 does not support max reductions, which are part of OpenMP 3.1 double CurrentCFL = MaxCFL[0]; for (unsigned int k = 1; k < NumThreads; k++) { if (CurrentCFL > MaxCFL[k]) CurrentCFL = MaxCFL[k]; } double NewDt = 0.0; // Avoid division by 0 when the maximum CFL number is close to 0 (e.g. problem initialization) if (CurrentCFL < 1e-10) { KRATOS_INFO("EstimateDtUtility") << "Setting minimum delta time " << mDtMin << " as current time step." << std::endl; NewDt = mDtMin; } else { // Compute new Dt NewDt = mCFL * CurrentDt / CurrentCFL; // Limit max and min Dt if (NewDt > mDtMax) { NewDt = mDtMax; } else if (NewDt < mDtMin) { NewDt = mDtMin; } } // Perform MPI sync if needed NewDt = mrModelPart.GetCommunicator().GetDataCommunicator().MinAll(NewDt); return NewDt; KRATOS_CATCH("") } /// Calculate each element's CFL for the current time step for the given ModelPart. /** * The elemental CFL is stored in the CFL_NUMBER elemental variable. * To view it in the post-process file, remember to print CFL_NUMBER as a Gauss Point result. */ static void CalculateLocalCFL(ModelPart& rModelPart) { KRATOS_TRY; unsigned int NumThreads = OpenMPUtils::GetNumThreads(); OpenMPUtils::PartitionVector ElementPartition; OpenMPUtils::DivideInPartitions(rModelPart.NumberOfElements(),NumThreads,ElementPartition); const double CurrentDt = rModelPart.GetProcessInfo().GetValue(DELTA_TIME); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::ElementIterator ElemBegin = rModelPart.ElementsBegin() + ElementPartition[k]; ModelPart::ElementIterator ElemEnd = rModelPart.ElementsBegin() + ElementPartition[k+1]; GeometryDataContainer GeometryInfo; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { double ElementCFL = EstimateDtUtility<TDim>::CalculateElementCFL(*itElem,GeometryInfo,CurrentDt); itElem->SetValue(CFL_NUMBER,ElementCFL); } } KRATOS_CATCH("") } /// Calculate each element's CFL for the current time step. /** * The elemental CFL is stored in the CFL_NUMBER elemental variable. * To view it in the post-process file, remember to print CFL_NUMBER as a Gauss Point result. */ void CalculateLocalCFL() { EstimateDtUtility<TDim>::CalculateLocalCFL(mrModelPart); } ///@} // Operators private: ///@name Auxiliary Data types ///@{ struct GeometryDataContainer { double Area; array_1d<double, TDim+1> N; BoundedMatrix<double, TDim+1, TDim> DN_DX; }; ///@} ///@name Member Variables ///@{ double mCFL; // User-defined CFL number double mDtMax; // User-defined maximum time increment allowed double mDtMin; // User-defined minimum time increment allowed ModelPart &mrModelPart; // The problem's model part ///@} // Member variables ///@name Private Operations ///@{ static double CalculateElementCFL(Element &rElement, GeometryDataContainer& rGeometryInfo, double Dt) { double Proj = 0.0; // Get the element's geometric parameters const auto& r_geometry = rElement.GetGeometry(); GeometryUtils::CalculateGeometryData(r_geometry, rGeometryInfo.DN_DX, rGeometryInfo.N, rGeometryInfo.Area); // Elemental Velocity array_1d<double,3> ElementVel = rGeometryInfo.N[0]*r_geometry[0].FastGetSolutionStepValue(VELOCITY); for (unsigned int i = 1; i < TDim+1; ++i) ElementVel += rGeometryInfo.N[i]*r_geometry[i].FastGetSolutionStepValue(VELOCITY); // Calculate u/h as the maximum projection of the velocity along element heights for (unsigned int i = 0; i < TDim+1; ++i) { for (unsigned int d = 0; d < TDim; ++d) Proj += ElementVel[d]*rGeometryInfo.DN_DX(i,d); Proj = fabs(Proj); } return Proj*Dt; } ///@} // Private Operations }; ///@} // Kratos classes ///@} } // namespace Kratos. #endif /* KRATOS_ESTIMATE_DT_UTILITIES_H */